problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_14713 | rasdani/github-patches | git_diff | pypa__setuptools-3309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
package_data not doing recursive glob calls
Even though glob supports it, glob support for recursive ** searching is not enabled when specifying package_data. This is not clear in the documentation.
https://github.com/pypa/setuptools/blob/a94ccbf404a79d56f9b171024dee361de9a948da/setuptools/command/build_py.py#L106
This means that only single `*` searches in a specific directory are supported. By changing the `glob` function to support recursive, this would allow us to specify `**` patterns to a directory instead of having to specify every sub-directory in the structure.
Reference:
https://github.com/python/cpython/blob/master/Lib/glob.py#L18
</issue>
<code>
[start of setuptools/command/build_py.py]
1 from glob import glob
2 from distutils.util import convert_path
3 import distutils.command.build_py as orig
4 import os
5 import fnmatch
6 import textwrap
7 import io
8 import distutils.errors
9 import itertools
10 import stat
11 from setuptools.extern.more_itertools import unique_everseen
12
13
14 def make_writable(target):
15 os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
16
17
18 class build_py(orig.build_py):
19 """Enhanced 'build_py' command that includes data files with packages
20
21 The data files are specified via a 'package_data' argument to 'setup()'.
22 See 'setuptools.dist.Distribution' for more details.
23
24 Also, this version of the 'build_py' command allows you to specify both
25 'py_modules' and 'packages' in the same setup operation.
26 """
27
28 def finalize_options(self):
29 orig.build_py.finalize_options(self)
30 self.package_data = self.distribution.package_data
31 self.exclude_package_data = self.distribution.exclude_package_data or {}
32 if 'data_files' in self.__dict__:
33 del self.__dict__['data_files']
34 self.__updated_files = []
35
36 def run(self):
37 """Build modules, packages, and copy data files to build directory"""
38 if not self.py_modules and not self.packages:
39 return
40
41 if self.py_modules:
42 self.build_modules()
43
44 if self.packages:
45 self.build_packages()
46 self.build_package_data()
47
48 # Only compile actual .py files, using our base class' idea of what our
49 # output files are.
50 self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
51
52 def __getattr__(self, attr):
53 "lazily compute data files"
54 if attr == 'data_files':
55 self.data_files = self._get_data_files()
56 return self.data_files
57 return orig.build_py.__getattr__(self, attr)
58
59 def build_module(self, module, module_file, package):
60 outfile, copied = orig.build_py.build_module(self, module, module_file, package)
61 if copied:
62 self.__updated_files.append(outfile)
63 return outfile, copied
64
65 def _get_data_files(self):
66 """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
67 self.analyze_manifest()
68 return list(map(self._get_pkg_data_files, self.packages or ()))
69
70 def get_data_files_without_manifest(self):
71 """
72 Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
73 but without triggering any attempt to analyze or build the manifest.
74 """
75 # Prevent eventual errors from unset `manifest_files`
76 # (that would otherwise be set by `analyze_manifest`)
77 self.__dict__.setdefault('manifest_files', {})
78 return list(map(self._get_pkg_data_files, self.packages or ()))
79
80 def _get_pkg_data_files(self, package):
81 # Locate package source directory
82 src_dir = self.get_package_dir(package)
83
84 # Compute package build directory
85 build_dir = os.path.join(*([self.build_lib] + package.split('.')))
86
87 # Strip directory from globbed filenames
88 filenames = [
89 os.path.relpath(file, src_dir)
90 for file in self.find_data_files(package, src_dir)
91 ]
92 return package, src_dir, build_dir, filenames
93
94 def find_data_files(self, package, src_dir):
95 """Return filenames for package's data files in 'src_dir'"""
96 patterns = self._get_platform_patterns(
97 self.package_data,
98 package,
99 src_dir,
100 )
101 globs_expanded = map(glob, patterns)
102 # flatten the expanded globs into an iterable of matches
103 globs_matches = itertools.chain.from_iterable(globs_expanded)
104 glob_files = filter(os.path.isfile, globs_matches)
105 files = itertools.chain(
106 self.manifest_files.get(package, []),
107 glob_files,
108 )
109 return self.exclude_data_files(package, src_dir, files)
110
111 def build_package_data(self):
112 """Copy data files into build directory"""
113 for package, src_dir, build_dir, filenames in self.data_files:
114 for filename in filenames:
115 target = os.path.join(build_dir, filename)
116 self.mkpath(os.path.dirname(target))
117 srcfile = os.path.join(src_dir, filename)
118 outf, copied = self.copy_file(srcfile, target)
119 make_writable(target)
120 srcfile = os.path.abspath(srcfile)
121
122 def analyze_manifest(self):
123 self.manifest_files = mf = {}
124 if not self.distribution.include_package_data:
125 return
126 src_dirs = {}
127 for package in self.packages or ():
128 # Locate package source directory
129 src_dirs[assert_relative(self.get_package_dir(package))] = package
130
131 self.run_command('egg_info')
132 ei_cmd = self.get_finalized_command('egg_info')
133 for path in ei_cmd.filelist.files:
134 d, f = os.path.split(assert_relative(path))
135 prev = None
136 oldf = f
137 while d and d != prev and d not in src_dirs:
138 prev = d
139 d, df = os.path.split(d)
140 f = os.path.join(df, f)
141 if d in src_dirs:
142 if path.endswith('.py') and f == oldf:
143 continue # it's a module, not data
144 mf.setdefault(src_dirs[d], []).append(path)
145
146 def get_data_files(self):
147 pass # Lazily compute data files in _get_data_files() function.
148
149 def check_package(self, package, package_dir):
150 """Check namespace packages' __init__ for declare_namespace"""
151 try:
152 return self.packages_checked[package]
153 except KeyError:
154 pass
155
156 init_py = orig.build_py.check_package(self, package, package_dir)
157 self.packages_checked[package] = init_py
158
159 if not init_py or not self.distribution.namespace_packages:
160 return init_py
161
162 for pkg in self.distribution.namespace_packages:
163 if pkg == package or pkg.startswith(package + '.'):
164 break
165 else:
166 return init_py
167
168 with io.open(init_py, 'rb') as f:
169 contents = f.read()
170 if b'declare_namespace' not in contents:
171 raise distutils.errors.DistutilsError(
172 "Namespace package problem: %s is a namespace package, but "
173 "its\n__init__.py does not call declare_namespace()! Please "
174 'fix it.\n(See the setuptools manual under '
175 '"Namespace Packages" for details.)\n"' % (package,)
176 )
177 return init_py
178
179 def initialize_options(self):
180 self.packages_checked = {}
181 orig.build_py.initialize_options(self)
182
183 def get_package_dir(self, package):
184 res = orig.build_py.get_package_dir(self, package)
185 if self.distribution.src_root is not None:
186 return os.path.join(self.distribution.src_root, res)
187 return res
188
189 def exclude_data_files(self, package, src_dir, files):
190 """Filter filenames for package's data files in 'src_dir'"""
191 files = list(files)
192 patterns = self._get_platform_patterns(
193 self.exclude_package_data,
194 package,
195 src_dir,
196 )
197 match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
198 # flatten the groups of matches into an iterable of matches
199 matches = itertools.chain.from_iterable(match_groups)
200 bad = set(matches)
201 keepers = (fn for fn in files if fn not in bad)
202 # ditch dupes
203 return list(unique_everseen(keepers))
204
205 @staticmethod
206 def _get_platform_patterns(spec, package, src_dir):
207 """
208 yield platform-specific path patterns (suitable for glob
209 or fn_match) from a glob-based spec (such as
210 self.package_data or self.exclude_package_data)
211 matching package in src_dir.
212 """
213 raw_patterns = itertools.chain(
214 spec.get('', []),
215 spec.get(package, []),
216 )
217 return (
218 # Each pattern has to be converted to a platform-specific path
219 os.path.join(src_dir, convert_path(pattern))
220 for pattern in raw_patterns
221 )
222
223
224 def assert_relative(path):
225 if not os.path.isabs(path):
226 return path
227 from distutils.errors import DistutilsSetupError
228
229 msg = (
230 textwrap.dedent(
231 """
232 Error: setup script specifies an absolute path:
233
234 %s
235
236 setup() arguments must *always* be /-separated paths relative to the
237 setup.py directory, *never* absolute paths.
238 """
239 ).lstrip()
240 % path
241 )
242 raise DistutilsSetupError(msg)
243
[end of setuptools/command/build_py.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py
--- a/setuptools/command/build_py.py
+++ b/setuptools/command/build_py.py
@@ -1,3 +1,4 @@
+from functools import partial
from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
@@ -98,7 +99,7 @@
package,
src_dir,
)
- globs_expanded = map(glob, patterns)
+ globs_expanded = map(partial(glob, recursive=True), patterns)
# flatten the expanded globs into an iterable of matches
globs_matches = itertools.chain.from_iterable(globs_expanded)
glob_files = filter(os.path.isfile, globs_matches)
| {"golden_diff": "diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py\n--- a/setuptools/command/build_py.py\n+++ b/setuptools/command/build_py.py\n@@ -1,3 +1,4 @@\n+from functools import partial\n from glob import glob\n from distutils.util import convert_path\n import distutils.command.build_py as orig\n@@ -98,7 +99,7 @@\n package,\n src_dir,\n )\n- globs_expanded = map(glob, patterns)\n+ globs_expanded = map(partial(glob, recursive=True), patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n", "issue": "package_data not doing recursive glob calls\nEven though glob supports it, glob support for recursive ** searching is not enabled when specifying package_data. This is not clear in the documentation.\r\n\r\nhttps://github.com/pypa/setuptools/blob/a94ccbf404a79d56f9b171024dee361de9a948da/setuptools/command/build_py.py#L106\r\n\r\nThis means that only single `*` searches in a specific directory are supported. By changing the `glob` function to support recursive, this would allow us to specify `**` patterns to a directory instead of having to specify every sub-directory in the structure.\r\n\r\nReference:\r\nhttps://github.com/python/cpython/blob/master/Lib/glob.py#L18\n", "before_files": [{"content": "from glob import glob\nfrom distutils.util import convert_path\nimport distutils.command.build_py as orig\nimport os\nimport fnmatch\nimport textwrap\nimport io\nimport distutils.errors\nimport itertools\nimport stat\nfrom setuptools.extern.more_itertools import unique_everseen\n\n\ndef make_writable(target):\n os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)\n\n\nclass build_py(orig.build_py):\n \"\"\"Enhanced 'build_py' command that includes data files with packages\n\n The data files are specified via a 'package_data' argument to 'setup()'.\n See 'setuptools.dist.Distribution' for more details.\n\n Also, this version of the 'build_py' command allows you to specify both\n 'py_modules' and 'packages' in the same setup operation.\n \"\"\"\n\n def finalize_options(self):\n orig.build_py.finalize_options(self)\n self.package_data = self.distribution.package_data\n self.exclude_package_data = self.distribution.exclude_package_data or {}\n if 'data_files' in self.__dict__:\n del self.__dict__['data_files']\n self.__updated_files = []\n\n def run(self):\n \"\"\"Build modules, packages, and copy data files to build directory\"\"\"\n if not self.py_modules and not self.packages:\n return\n\n if self.py_modules:\n self.build_modules()\n\n if self.packages:\n self.build_packages()\n self.build_package_data()\n\n # Only compile actual .py files, using our base class' idea of what our\n # output files are.\n self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))\n\n def __getattr__(self, attr):\n \"lazily compute data files\"\n if attr == 'data_files':\n self.data_files = self._get_data_files()\n return self.data_files\n return orig.build_py.__getattr__(self, attr)\n\n def build_module(self, module, module_file, package):\n outfile, copied = orig.build_py.build_module(self, module, module_file, package)\n if copied:\n self.__updated_files.append(outfile)\n return outfile, copied\n\n def _get_data_files(self):\n \"\"\"Generate list of '(package,src_dir,build_dir,filenames)' tuples\"\"\"\n self.analyze_manifest()\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def get_data_files_without_manifest(self):\n \"\"\"\n Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,\n but without triggering any attempt to analyze or build the manifest.\n \"\"\"\n # Prevent eventual errors from unset `manifest_files`\n # (that would otherwise be set by `analyze_manifest`)\n self.__dict__.setdefault('manifest_files', {})\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def _get_pkg_data_files(self, package):\n # Locate package source directory\n src_dir = self.get_package_dir(package)\n\n # Compute package build directory\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\n\n # Strip directory from globbed filenames\n filenames = [\n os.path.relpath(file, src_dir)\n for file in self.find_data_files(package, src_dir)\n ]\n return package, src_dir, build_dir, filenames\n\n def find_data_files(self, package, src_dir):\n \"\"\"Return filenames for package's data files in 'src_dir'\"\"\"\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)\n\n def build_package_data(self):\n \"\"\"Copy data files into build directory\"\"\"\n for package, src_dir, build_dir, filenames in self.data_files:\n for filename in filenames:\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n outf, copied = self.copy_file(srcfile, target)\n make_writable(target)\n srcfile = os.path.abspath(srcfile)\n\n def analyze_manifest(self):\n self.manifest_files = mf = {}\n if not self.distribution.include_package_data:\n return\n src_dirs = {}\n for package in self.packages or ():\n # Locate package source directory\n src_dirs[assert_relative(self.get_package_dir(package))] = package\n\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n for path in ei_cmd.filelist.files:\n d, f = os.path.split(assert_relative(path))\n prev = None\n oldf = f\n while d and d != prev and d not in src_dirs:\n prev = d\n d, df = os.path.split(d)\n f = os.path.join(df, f)\n if d in src_dirs:\n if path.endswith('.py') and f == oldf:\n continue # it's a module, not data\n mf.setdefault(src_dirs[d], []).append(path)\n\n def get_data_files(self):\n pass # Lazily compute data files in _get_data_files() function.\n\n def check_package(self, package, package_dir):\n \"\"\"Check namespace packages' __init__ for declare_namespace\"\"\"\n try:\n return self.packages_checked[package]\n except KeyError:\n pass\n\n init_py = orig.build_py.check_package(self, package, package_dir)\n self.packages_checked[package] = init_py\n\n if not init_py or not self.distribution.namespace_packages:\n return init_py\n\n for pkg in self.distribution.namespace_packages:\n if pkg == package or pkg.startswith(package + '.'):\n break\n else:\n return init_py\n\n with io.open(init_py, 'rb') as f:\n contents = f.read()\n if b'declare_namespace' not in contents:\n raise distutils.errors.DistutilsError(\n \"Namespace package problem: %s is a namespace package, but \"\n \"its\\n__init__.py does not call declare_namespace()! Please \"\n 'fix it.\\n(See the setuptools manual under '\n '\"Namespace Packages\" for details.)\\n\"' % (package,)\n )\n return init_py\n\n def initialize_options(self):\n self.packages_checked = {}\n orig.build_py.initialize_options(self)\n\n def get_package_dir(self, package):\n res = orig.build_py.get_package_dir(self, package)\n if self.distribution.src_root is not None:\n return os.path.join(self.distribution.src_root, res)\n return res\n\n def exclude_data_files(self, package, src_dir, files):\n \"\"\"Filter filenames for package's data files in 'src_dir'\"\"\"\n files = list(files)\n patterns = self._get_platform_patterns(\n self.exclude_package_data,\n package,\n src_dir,\n )\n match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)\n # flatten the groups of matches into an iterable of matches\n matches = itertools.chain.from_iterable(match_groups)\n bad = set(matches)\n keepers = (fn for fn in files if fn not in bad)\n # ditch dupes\n return list(unique_everseen(keepers))\n\n @staticmethod\n def _get_platform_patterns(spec, package, src_dir):\n \"\"\"\n yield platform-specific path patterns (suitable for glob\n or fn_match) from a glob-based spec (such as\n self.package_data or self.exclude_package_data)\n matching package in src_dir.\n \"\"\"\n raw_patterns = itertools.chain(\n spec.get('', []),\n spec.get(package, []),\n )\n return (\n # Each pattern has to be converted to a platform-specific path\n os.path.join(src_dir, convert_path(pattern))\n for pattern in raw_patterns\n )\n\n\ndef assert_relative(path):\n if not os.path.isabs(path):\n return path\n from distutils.errors import DistutilsSetupError\n\n msg = (\n textwrap.dedent(\n \"\"\"\n Error: setup script specifies an absolute path:\n\n %s\n\n setup() arguments must *always* be /-separated paths relative to the\n setup.py directory, *never* absolute paths.\n \"\"\"\n ).lstrip()\n % path\n )\n raise DistutilsSetupError(msg)\n", "path": "setuptools/command/build_py.py"}]} | 3,188 | 167 |
gh_patches_debug_13024 | rasdani/github-patches | git_diff | vega__altair-2570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tooltip doesn't support "image" key
My understanding is that to render tooltips, Altair uses the `vega-tooltip` plugin. Per that project's [README](https://github.com/vega/vega-tooltip), one awesome feature that library has is the ability to render images as part of the tooltip with the image key. From the docs:
> Supports special keys title (becomes the title of the tooltip) and image (used as the url for an embedded image)
Using the tooltip without the `image` key is fine:
```
mydata = pd.DataFrame.from_records([{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])
chart = alt.Chart(mydata).mark_circle().encode(
x='a',
y='b',
tooltip=alt.Tooltip(['a'], title='My Cool Tooltip')
).interactive()
display(chart)
```
However, when I add the `image` key, it stops working:
```
chart = alt.Chart(mydata).mark_circle().encode(
x='a',
y='b',
tooltip=alt.Tooltip(['a'], title='My Cool Tooltip', image='https://picsum.photos/200')
).interactive()
display(chart)
```
```
SchemaValidationError: Invalid specification
altair.vegalite.v4.schema.channels.Tooltip, validating 'additionalProperties'
Additional properties are not allowed ('image' was unexpected)
```
Maybe this feature is already supported but the schema is out of date? Thanks.
---
Please follow these steps to make it more efficient to solve your issue:
- [N/A] Since Altair is a Python wrapper around the Vega-Lite visualization grammar, [most bugs should be reported directly to Vega-Lite](https://github.com/vega/vega-lite/issues). You can click the Action Button of your Altair chart and "Open in Vega Editor" to create a reproducible Vega-Lite example and see if you get the same error in the Vega Editor.
- [x] Search for duplicate issues.
- [x] Use the latest version of Altair.
- [x] Describe how to reproduce the bug and include the full code and data to reproduce it, ideally using a sample data set from `vega_datasets`.
</issue>
<code>
[start of altair/examples/image_tooltip.py]
1 """
2 Image tooltip
3 -------------
4 This example shows how to render images in tooltips.
5 Either URLs or local file paths can be used to reference
6 the images.
7 """
8 # category: other charts
9
10 import altair as alt
11 import pandas as pd
12
13 source = pd.DataFrame.from_records(
14 [{'a': 1, 'b': 1, 'image': 'https://altair-viz.github.io/_static/altair-logo-light.png'},
15 {'a': 2, 'b': 2, 'image': 'https://avatars.githubusercontent.com/u/11796929?s=200&v=4'}]
16 )
17 alt.Chart(source).mark_circle(size=200).encode(
18 x='a',
19 y='b',
20 tooltip=['image'] # Must be a list for the image to render
21 )
22
[end of altair/examples/image_tooltip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/examples/image_tooltip.py b/altair/examples/image_tooltip.py
--- a/altair/examples/image_tooltip.py
+++ b/altair/examples/image_tooltip.py
@@ -3,7 +3,9 @@
-------------
This example shows how to render images in tooltips.
Either URLs or local file paths can be used to reference
-the images.
+the images. To render the image, you must use the special
+column name "image" in your data and pass it as a list to
+the tooltip encoding.
"""
# category: other charts
@@ -17,5 +19,5 @@
alt.Chart(source).mark_circle(size=200).encode(
x='a',
y='b',
- tooltip=['image'] # Must be a list for the image to render
+ tooltip=['image'] # Must be a list containing a field called "image"
)
| {"golden_diff": "diff --git a/altair/examples/image_tooltip.py b/altair/examples/image_tooltip.py\n--- a/altair/examples/image_tooltip.py\n+++ b/altair/examples/image_tooltip.py\n@@ -3,7 +3,9 @@\n -------------\n This example shows how to render images in tooltips.\n Either URLs or local file paths can be used to reference\n-the images.\n+the images. To render the image, you must use the special\n+column name \"image\" in your data and pass it as a list to\n+the tooltip encoding.\n \"\"\"\n # category: other charts\n \n@@ -17,5 +19,5 @@\n alt.Chart(source).mark_circle(size=200).encode(\n x='a',\n y='b',\n- tooltip=['image'] # Must be a list for the image to render\n+ tooltip=['image'] # Must be a list containing a field called \"image\"\n )\n", "issue": "Tooltip doesn't support \"image\" key\nMy understanding is that to render tooltips, Altair uses the `vega-tooltip` plugin. Per that project's [README](https://github.com/vega/vega-tooltip), one awesome feature that library has is the ability to render images as part of the tooltip with the image key. From the docs:\r\n\r\n> Supports special keys title (becomes the title of the tooltip) and image (used as the url for an embedded image)\r\n\r\nUsing the tooltip without the `image` key is fine:\r\n\r\n```\r\nmydata = pd.DataFrame.from_records([{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])\r\nchart = alt.Chart(mydata).mark_circle().encode(\r\n x='a',\r\n y='b',\r\n tooltip=alt.Tooltip(['a'], title='My Cool Tooltip')\r\n).interactive()\r\n\r\ndisplay(chart)\r\n```\r\n\r\nHowever, when I add the `image` key, it stops working:\r\n\r\n```\r\nchart = alt.Chart(mydata).mark_circle().encode(\r\n x='a',\r\n y='b',\r\n tooltip=alt.Tooltip(['a'], title='My Cool Tooltip', image='https://picsum.photos/200')\r\n).interactive()\r\n\r\ndisplay(chart)\r\n```\r\n\r\n```\r\nSchemaValidationError: Invalid specification\r\n\r\n altair.vegalite.v4.schema.channels.Tooltip, validating 'additionalProperties'\r\n\r\n Additional properties are not allowed ('image' was unexpected)\r\n```\r\n\r\nMaybe this feature is already supported but the schema is out of date? Thanks.\r\n\r\n---\r\n\r\nPlease follow these steps to make it more efficient to solve your issue:\r\n\r\n- [N/A] Since Altair is a Python wrapper around the Vega-Lite visualization grammar, [most bugs should be reported directly to Vega-Lite](https://github.com/vega/vega-lite/issues). You can click the Action Button of your Altair chart and \"Open in Vega Editor\" to create a reproducible Vega-Lite example and see if you get the same error in the Vega Editor.\r\n- [x] Search for duplicate issues.\r\n- [x] Use the latest version of Altair.\r\n- [x] Describe how to reproduce the bug and include the full code and data to reproduce it, ideally using a sample data set from `vega_datasets`.\r\n\n", "before_files": [{"content": "\"\"\"\nImage tooltip\n-------------\nThis example shows how to render images in tooltips.\nEither URLs or local file paths can be used to reference\nthe images.\n\"\"\"\n# category: other charts\n\nimport altair as alt\nimport pandas as pd\n\nsource = pd.DataFrame.from_records(\n [{'a': 1, 'b': 1, 'image': 'https://altair-viz.github.io/_static/altair-logo-light.png'},\n {'a': 2, 'b': 2, 'image': 'https://avatars.githubusercontent.com/u/11796929?s=200&v=4'}]\n)\nalt.Chart(source).mark_circle(size=200).encode(\n x='a',\n y='b',\n tooltip=['image'] # Must be a list for the image to render\n)\n", "path": "altair/examples/image_tooltip.py"}]} | 1,229 | 195 |
gh_patches_debug_814 | rasdani/github-patches | git_diff | scrapy__scrapy-4311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider making METAREFRESH_IGNORE_TAGS an empty list by default
As a way to allow users to fix #1422, #3768 introduced the `METAREFRESH_IGNORE_TAGS` setting.
To keep backward compatibility, the setting was introduced with `['script', 'noscript']` as default value. However, to reproduce the behavior of web browsers, it seems the right value would be `[]`.
Should we switch the default value of the `METAREFRESH_IGNORE_TAGS` setting to `[]`, even though the change breaks backward compatibility?
</issue>
<code>
[start of scrapy/settings/default_settings.py]
1 """
2 This module contains the default values for all settings used by Scrapy.
3
4 For more information about these settings you can read the settings
5 documentation in docs/topics/settings.rst
6
7 Scrapy developers, if you add a setting here remember to:
8
9 * add it in alphabetical order
10 * group similar settings without leaving blank lines
11 * add its documentation to the available settings documentation
12 (docs/topics/settings.rst)
13
14 """
15
16 import sys
17 from importlib import import_module
18 from os.path import join, abspath, dirname
19
20 AJAXCRAWL_ENABLED = False
21
22 ASYNCIO_REACTOR = False
23
24 AUTOTHROTTLE_ENABLED = False
25 AUTOTHROTTLE_DEBUG = False
26 AUTOTHROTTLE_MAX_DELAY = 60.0
27 AUTOTHROTTLE_START_DELAY = 5.0
28 AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
29
30 BOT_NAME = 'scrapybot'
31
32 CLOSESPIDER_TIMEOUT = 0
33 CLOSESPIDER_PAGECOUNT = 0
34 CLOSESPIDER_ITEMCOUNT = 0
35 CLOSESPIDER_ERRORCOUNT = 0
36
37 COMMANDS_MODULE = ''
38
39 COMPRESSION_ENABLED = True
40
41 CONCURRENT_ITEMS = 100
42
43 CONCURRENT_REQUESTS = 16
44 CONCURRENT_REQUESTS_PER_DOMAIN = 8
45 CONCURRENT_REQUESTS_PER_IP = 0
46
47 COOKIES_ENABLED = True
48 COOKIES_DEBUG = False
49
50 DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
51
52 DEFAULT_REQUEST_HEADERS = {
53 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
54 'Accept-Language': 'en',
55 }
56
57 DEPTH_LIMIT = 0
58 DEPTH_STATS_VERBOSE = False
59 DEPTH_PRIORITY = 0
60
61 DNSCACHE_ENABLED = True
62 DNSCACHE_SIZE = 10000
63 DNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver'
64 DNS_TIMEOUT = 60
65
66 DOWNLOAD_DELAY = 0
67
68 DOWNLOAD_HANDLERS = {}
69 DOWNLOAD_HANDLERS_BASE = {
70 'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler',
71 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
72 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
73 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
74 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
75 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
76 }
77
78 DOWNLOAD_TIMEOUT = 180 # 3mins
79
80 DOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m
81 DOWNLOAD_WARNSIZE = 32*1024*1024 # 32m
82
83 DOWNLOAD_FAIL_ON_DATALOSS = True
84
85 DOWNLOADER = 'scrapy.core.downloader.Downloader'
86
87 DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
88 DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
89 DOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT'
90 # Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation:
91 DOWNLOADER_CLIENT_TLS_METHOD = 'TLS'
92 DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False
93
94 DOWNLOADER_MIDDLEWARES = {}
95
96 DOWNLOADER_MIDDLEWARES_BASE = {
97 # Engine side
98 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
99 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,
100 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,
101 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,
102 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,
103 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,
104 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
105 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,
106 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
107 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
108 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
109 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,
110 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
111 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,
112 # Downloader side
113 }
114
115 DOWNLOADER_STATS = True
116
117 DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'
118
119 EDITOR = 'vi'
120 if sys.platform == 'win32':
121 EDITOR = '%s -m idlelib.idle'
122
123 EXTENSIONS = {}
124
125 EXTENSIONS_BASE = {
126 'scrapy.extensions.corestats.CoreStats': 0,
127 'scrapy.extensions.telnet.TelnetConsole': 0,
128 'scrapy.extensions.memusage.MemoryUsage': 0,
129 'scrapy.extensions.memdebug.MemoryDebugger': 0,
130 'scrapy.extensions.closespider.CloseSpider': 0,
131 'scrapy.extensions.feedexport.FeedExporter': 0,
132 'scrapy.extensions.logstats.LogStats': 0,
133 'scrapy.extensions.spiderstate.SpiderState': 0,
134 'scrapy.extensions.throttle.AutoThrottle': 0,
135 }
136
137 FEED_TEMPDIR = None
138 FEED_URI = None
139 FEED_URI_PARAMS = None # a function to extend uri arguments
140 FEED_FORMAT = 'jsonlines'
141 FEED_STORE_EMPTY = False
142 FEED_EXPORT_ENCODING = None
143 FEED_EXPORT_FIELDS = None
144 FEED_STORAGES = {}
145 FEED_STORAGES_BASE = {
146 '': 'scrapy.extensions.feedexport.FileFeedStorage',
147 'file': 'scrapy.extensions.feedexport.FileFeedStorage',
148 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',
149 's3': 'scrapy.extensions.feedexport.S3FeedStorage',
150 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',
151 }
152 FEED_EXPORTERS = {}
153 FEED_EXPORTERS_BASE = {
154 'json': 'scrapy.exporters.JsonItemExporter',
155 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',
156 'jl': 'scrapy.exporters.JsonLinesItemExporter',
157 'csv': 'scrapy.exporters.CsvItemExporter',
158 'xml': 'scrapy.exporters.XmlItemExporter',
159 'marshal': 'scrapy.exporters.MarshalItemExporter',
160 'pickle': 'scrapy.exporters.PickleItemExporter',
161 }
162 FEED_EXPORT_INDENT = 0
163
164 FEED_STORAGE_FTP_ACTIVE = False
165 FEED_STORAGE_S3_ACL = ''
166
167 FILES_STORE_S3_ACL = 'private'
168 FILES_STORE_GCS_ACL = ''
169
170 FTP_USER = 'anonymous'
171 FTP_PASSWORD = 'guest'
172 FTP_PASSIVE_MODE = True
173
174 HTTPCACHE_ENABLED = False
175 HTTPCACHE_DIR = 'httpcache'
176 HTTPCACHE_IGNORE_MISSING = False
177 HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
178 HTTPCACHE_EXPIRATION_SECS = 0
179 HTTPCACHE_ALWAYS_STORE = False
180 HTTPCACHE_IGNORE_HTTP_CODES = []
181 HTTPCACHE_IGNORE_SCHEMES = ['file']
182 HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []
183 HTTPCACHE_DBM_MODULE = 'dbm'
184 HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'
185 HTTPCACHE_GZIP = False
186
187 HTTPPROXY_ENABLED = True
188 HTTPPROXY_AUTH_ENCODING = 'latin-1'
189
190 IMAGES_STORE_S3_ACL = 'private'
191 IMAGES_STORE_GCS_ACL = ''
192
193 ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'
194
195 ITEM_PIPELINES = {}
196 ITEM_PIPELINES_BASE = {}
197
198 LOG_ENABLED = True
199 LOG_ENCODING = 'utf-8'
200 LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
201 LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
202 LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'
203 LOG_STDOUT = False
204 LOG_LEVEL = 'DEBUG'
205 LOG_FILE = None
206 LOG_SHORT_NAMES = False
207
208 SCHEDULER_DEBUG = False
209
210 LOGSTATS_INTERVAL = 60.0
211
212 MAIL_HOST = 'localhost'
213 MAIL_PORT = 25
214 MAIL_FROM = 'scrapy@localhost'
215 MAIL_PASS = None
216 MAIL_USER = None
217
218 MEMDEBUG_ENABLED = False # enable memory debugging
219 MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
220
221 MEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0
222 MEMUSAGE_ENABLED = True
223 MEMUSAGE_LIMIT_MB = 0
224 MEMUSAGE_NOTIFY_MAIL = []
225 MEMUSAGE_WARNING_MB = 0
226
227 METAREFRESH_ENABLED = True
228 METAREFRESH_IGNORE_TAGS = ['script', 'noscript']
229 METAREFRESH_MAXDELAY = 100
230
231 NEWSPIDER_MODULE = ''
232
233 RANDOMIZE_DOWNLOAD_DELAY = True
234
235 REACTOR_THREADPOOL_MAXSIZE = 10
236
237 REDIRECT_ENABLED = True
238 REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
239 REDIRECT_PRIORITY_ADJUST = +2
240
241 REFERER_ENABLED = True
242 REFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'
243
244 RETRY_ENABLED = True
245 RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
246 RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]
247 RETRY_PRIORITY_ADJUST = -1
248
249 ROBOTSTXT_OBEY = False
250 ROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser'
251 ROBOTSTXT_USER_AGENT = None
252
253 SCHEDULER = 'scrapy.core.scheduler.Scheduler'
254 SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'
255 SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'
256 SCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue'
257
258 SCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000
259
260 SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'
261 SPIDER_LOADER_WARN_ONLY = False
262
263 SPIDER_MIDDLEWARES = {}
264
265 SPIDER_MIDDLEWARES_BASE = {
266 # Engine side
267 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,
268 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,
269 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,
270 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,
271 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,
272 # Spider side
273 }
274
275 SPIDER_MODULES = []
276
277 STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'
278 STATS_DUMP = True
279
280 STATSMAILER_RCPTS = []
281
282 TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
283
284 URLLENGTH_LIMIT = 2083
285
286 USER_AGENT = 'Scrapy/%s (+https://scrapy.org)' % import_module('scrapy').__version__
287
288 TELNETCONSOLE_ENABLED = 1
289 TELNETCONSOLE_PORT = [6023, 6073]
290 TELNETCONSOLE_HOST = '127.0.0.1'
291 TELNETCONSOLE_USERNAME = 'scrapy'
292 TELNETCONSOLE_PASSWORD = None
293
294 SPIDER_CONTRACTS = {}
295 SPIDER_CONTRACTS_BASE = {
296 'scrapy.contracts.default.UrlContract': 1,
297 'scrapy.contracts.default.CallbackKeywordArgumentsContract': 1,
298 'scrapy.contracts.default.ReturnsContract': 2,
299 'scrapy.contracts.default.ScrapesContract': 3,
300 }
301
[end of scrapy/settings/default_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py
--- a/scrapy/settings/default_settings.py
+++ b/scrapy/settings/default_settings.py
@@ -225,7 +225,7 @@
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
-METAREFRESH_IGNORE_TAGS = ['script', 'noscript']
+METAREFRESH_IGNORE_TAGS = []
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
| {"golden_diff": "diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py\n--- a/scrapy/settings/default_settings.py\n+++ b/scrapy/settings/default_settings.py\n@@ -225,7 +225,7 @@\n MEMUSAGE_WARNING_MB = 0\n \n METAREFRESH_ENABLED = True\n-METAREFRESH_IGNORE_TAGS = ['script', 'noscript']\n+METAREFRESH_IGNORE_TAGS = []\n METAREFRESH_MAXDELAY = 100\n \n NEWSPIDER_MODULE = ''\n", "issue": "Consider making METAREFRESH_IGNORE_TAGS an empty list by default\nAs a way to allow users to fix #1422, #3768 introduced the `METAREFRESH_IGNORE_TAGS` setting.\r\n\r\nTo keep backward compatibility, the setting was introduced with `['script', 'noscript']` as default value. However, to reproduce the behavior of web browsers, it seems the right value would be `[]`.\r\n\r\nShould we switch the default value of the `METAREFRESH_IGNORE_TAGS` setting to `[]`, even though the change breaks backward compatibility?\n", "before_files": [{"content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nAJAXCRAWL_ENABLED = False\n\nASYNCIO_REACTOR = False\n\nAUTOTHROTTLE_ENABLED = False\nAUTOTHROTTLE_DEBUG = False\nAUTOTHROTTLE_MAX_DELAY = 60.0\nAUTOTHROTTLE_START_DELAY = 5.0\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS_VERBOSE = False\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver'\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler',\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOAD_FAIL_ON_DATALOSS = True\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\nDOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT'\n# Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation:\nDOWNLOADER_CLIENT_TLS_METHOD = 'TLS'\nDOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\nEDITOR = 'vi'\nif sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.extensions.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_TEMPDIR = None\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_ENCODING = None\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\nFEED_EXPORT_INDENT = 0\n\nFEED_STORAGE_FTP_ACTIVE = False\nFEED_STORAGE_S3_ACL = ''\n\nFILES_STORE_S3_ACL = 'private'\nFILES_STORE_GCS_ACL = ''\n\nFTP_USER = 'anonymous'\nFTP_PASSWORD = 'guest'\nFTP_PASSIVE_MODE = True\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_ALWAYS_STORE = False\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []\nHTTPCACHE_DBM_MODULE = 'dbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nHTTPPROXY_ENABLED = True\nHTTPPROXY_AUTH_ENCODING = 'latin-1'\n\nIMAGES_STORE_S3_ACL = 'private'\nIMAGES_STORE_GCS_ACL = ''\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\nLOG_SHORT_NAMES = False\n\nSCHEDULER_DEBUG = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0\nMEMUSAGE_ENABLED = True\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_IGNORE_TAGS = ['script', 'noscript']\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\nREFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\nROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser'\nROBOTSTXT_USER_AGENT = None\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\nSCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue'\n\nSCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\nSPIDER_LOADER_WARN_ONLY = False\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+https://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\nTELNETCONSOLE_USERNAME = 'scrapy'\nTELNETCONSOLE_PASSWORD = None\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.CallbackKeywordArgumentsContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py"}]} | 4,052 | 108 |
gh_patches_debug_17870 | rasdani/github-patches | git_diff | Flexget__Flexget-2378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
descargas 2020 broken (again)
### Expected behaviour:
it downloads torrent properly.
### Actual behaviour:
### Steps to reproduce:
add this rss http://feeds.feedburner.com/descargas2020new
flexget execute
you will get:
2019-04-28 23:00 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar-torrent/121574_-1556477420-55-pasos--blurayrip/
2019-04-28 23:00 ERROR entry series_sigo Failed 55 Pasos [BluRay Rip][AC3 2.0 Castellano][2019][] (None)
#### Config:
```
tasks:
series_sigo:
rss:
url: http://feeds.feedburner.com/descargas2020new
link: link
all_series: yes
seen: local
regexp:
reject:
- PCDVD
accept_all: yes
thetvdb_lookup: yes
no_entries_ok: yes
set:
filename: "{{title|pathscrub}}.torrent"
download: /home/flexget/torrent/
```
#### Log:
2019-04-28 23:00 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar-torrent/121574_-1556477420-55-pasos--blurayrip/
2019-04-28 23:00 ERROR entry series_sigo Failed 55 Pasos [BluRay Rip][AC3 2.0 Castellano][2019][] (None)
```
paste log output here
```
</details>
### Additional information:
- FlexGet version: 2.20.23
- Python version: 2.7.13
- Installation method: i don't remember
- Using daemon (yes/no): no
- OS and version: debian 9 x64
- Link to crash log:
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
</issue>
<code>
[start of flexget/components/sites/sites/descargas2020.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import re
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.components.sites.urlrewriting import UrlRewritingError
10 from flexget.utils.requests import Session, TimedLimiter
11 from flexget.utils.soup import get_soup
12 from flexget.utils import requests
13
14 from flexget.entry import Entry
15 from flexget.components.sites.utils import normalize_unicode
16
17 import unicodedata
18
19 log = logging.getLogger('descargas2020')
20
21 DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'
22 REWRITABLE_REGEX = re.compile(
23 r'https?://(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com/'
24 )
25
26
27 class UrlRewriteDescargas2020(object):
28 """Descargas2020 urlrewriter and search."""
29
30 schema = {'type': 'boolean', 'default': False}
31
32 def __init__(self):
33 self._session = None
34
35 @property
36 def session(self):
37 # TODO: This is not used for all requests even ..
38 if self._session is None:
39 self._session = Session()
40 self._session.headers.update(
41 {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
42 )
43 self._session.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))
44 return self._session
45
46 # urlrewriter API
47 def url_rewritable(self, task, entry):
48 url = entry['url']
49 return not url.endswith('.torrent') and REWRITABLE_REGEX.match(url)
50
51 # urlrewriter API
52 def url_rewrite(self, task, entry):
53 entry['url'] = self.parse_download_page(entry['url'], task)
54
55 @plugin.internet(log)
56 def parse_download_page(self, url, task):
57 log.verbose('Descargas2020 URL: %s', url)
58
59 try:
60 page = self.session.get(url)
61 except requests.RequestException as e:
62 raise UrlRewritingError(e)
63 try:
64 soup = get_soup(page.text)
65 except Exception as e:
66 raise UrlRewritingError(e)
67
68 torrent_id = None
69 url_format = DESCARGAS2020_TORRENT_FORMAT
70
71 torrent_id_prog = re.compile(
72 r"(?:parametros\s*=\s*\n?)\s*{\s*\n(?:\s*'\w+'\s*:.*\n)+\s*'(?:torrentID|id)'\s*:\s*'(\d+)'"
73 )
74 torrent_ids = soup.findAll(text=torrent_id_prog)
75 if torrent_ids:
76 match = torrent_id_prog.search(torrent_ids[0])
77 if match:
78 torrent_id = match.group(1)
79 if not torrent_id:
80 log.debug('torrent ID not found, searching openTorrent script')
81 match = re.search(
82 r'function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\".*\/(\d+.*)\";',
83 page.text,
84 re.MULTILINE,
85 )
86 if match:
87 torrent_id = match.group(2).rstrip('/')
88
89 if not torrent_id:
90 raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)
91
92 return url_format.format(torrent_id)
93
94 def search(self, task, entry, config=None):
95 if not config:
96 log.debug('Descargas2020 disabled')
97 return set()
98 log.debug('Search Descargas2020')
99 url_search = 'http://descargas2020.com/buscar'
100 results = set()
101 for search_string in entry.get('search_strings', [entry['title']]):
102 query = normalize_unicode(search_string)
103 query = re.sub(r' \(\d\d\d\d\)$', '', query)
104 log.debug('Searching Descargas2020 %s', query)
105 query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')
106 data = {'q': query}
107 try:
108 response = task.requests.post(url_search, data=data)
109 except requests.RequestException as e:
110 log.error('Error searching Descargas2020: %s', e)
111 return results
112 content = response.content
113 soup = get_soup(content)
114 soup2 = soup.find('ul', attrs={'class': 'buscar-list'})
115 children = soup2.findAll('a', href=True)
116 for child in children:
117 entry = Entry()
118 entry['url'] = child['href']
119 entry_title = child.find('h2')
120 if entry_title is None:
121 log.debug('Ignore empty entry')
122 continue
123 entry_title = entry_title.text
124 if not entry_title:
125 continue
126 try:
127 entry_quality_lan = re.search(
128 r'.+ \[([^\]]+)\](\[[^\]]+\])+$', entry_title
129 ).group(1)
130 except AttributeError:
131 log.debug('Quality not found')
132 continue
133 entry_title = re.sub(r' \[.+]$', '', entry_title)
134 entry['title'] = entry_title + ' ' + entry_quality_lan
135 results.add(entry)
136 log.debug('Finish search Descargas2020 with %d entries', len(results))
137 return results
138
139
140 @event('plugin.register')
141 def register_plugin():
142 plugin.register(
143 UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2
144 )
145
[end of flexget/components/sites/sites/descargas2020.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/components/sites/sites/descargas2020.py b/flexget/components/sites/sites/descargas2020.py
--- a/flexget/components/sites/sites/descargas2020.py
+++ b/flexget/components/sites/sites/descargas2020.py
@@ -22,7 +22,9 @@
REWRITABLE_REGEX = re.compile(
r'https?://(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com/'
)
-
+NONREWRITABLE_REGEX = re.compile(
+ r'(.*/descargar-torrent/|.*\.torrent$)'
+)
class UrlRewriteDescargas2020(object):
"""Descargas2020 urlrewriter and search."""
@@ -46,7 +48,7 @@
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
- return not url.endswith('.torrent') and REWRITABLE_REGEX.match(url)
+ return REWRITABLE_REGEX.match(url) and not NONREWRITABLE_REGEX.match(url)
# urlrewriter API
def url_rewrite(self, task, entry):
| {"golden_diff": "diff --git a/flexget/components/sites/sites/descargas2020.py b/flexget/components/sites/sites/descargas2020.py\n--- a/flexget/components/sites/sites/descargas2020.py\n+++ b/flexget/components/sites/sites/descargas2020.py\n@@ -22,7 +22,9 @@\n REWRITABLE_REGEX = re.compile(\n r'https?://(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com/'\n )\n-\n+NONREWRITABLE_REGEX = re.compile(\n+ r'(.*/descargar-torrent/|.*\\.torrent$)'\n+)\n \n class UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n@@ -46,7 +48,7 @@\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n- return not url.endswith('.torrent') and REWRITABLE_REGEX.match(url)\n+ return REWRITABLE_REGEX.match(url) and not NONREWRITABLE_REGEX.match(url)\n \n # urlrewriter API\n def url_rewrite(self, task, entry):\n", "issue": "descargas 2020 broken (again)\n### Expected behaviour:\r\nit downloads torrent properly.\r\n\r\n### Actual behaviour:\r\n\r\n### Steps to reproduce:\r\nadd this rss http://feeds.feedburner.com/descargas2020new\r\nflexget execute\r\nyou will get:\r\n2019-04-28 23:00 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar-torrent/121574_-1556477420-55-pasos--blurayrip/\r\n2019-04-28 23:00 ERROR entry series_sigo Failed 55 Pasos [BluRay Rip][AC3 2.0 Castellano][2019][] (None)\r\n\r\n#### Config:\r\n\r\n```\r\ntasks:\r\n series_sigo:\r\n rss:\r\n url: http://feeds.feedburner.com/descargas2020new\r\n link: link\r\n all_series: yes\r\n seen: local\r\n regexp:\r\n reject:\r\n - PCDVD\r\n accept_all: yes\r\n thetvdb_lookup: yes\r\n no_entries_ok: yes\r\n set:\r\n filename: \"{{title|pathscrub}}.torrent\"\r\n download: /home/flexget/torrent/\r\n```\r\n \r\n#### Log:\r\n\r\n\r\n2019-04-28 23:00 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar-torrent/121574_-1556477420-55-pasos--blurayrip/\r\n2019-04-28 23:00 ERROR entry series_sigo Failed 55 Pasos [BluRay Rip][AC3 2.0 Castellano][2019][] (None)\r\n\r\n\r\n```\r\npaste log output here\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.20.23\r\n- Python version: 2.7.13\r\n- Installation method: i don't remember\r\n- Using daemon (yes/no): no\r\n- OS and version: debian 9 x64\r\n- Link to crash log:\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport re\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\nfrom flexget.utils.requests import Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils import requests\n\nfrom flexget.entry import Entry\nfrom flexget.components.sites.utils import normalize_unicode\n\nimport unicodedata\n\nlog = logging.getLogger('descargas2020')\n\nDESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\nREWRITABLE_REGEX = re.compile(\n r'https?://(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com/'\n)\n\n\nclass UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n\n schema = {'type': 'boolean', 'default': False}\n\n def __init__(self):\n self._session = None\n\n @property\n def session(self):\n # TODO: This is not used for all requests even ..\n if self._session is None:\n self._session = Session()\n self._session.headers.update(\n {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n )\n self._session.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n return self._session\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n return not url.endswith('.torrent') and REWRITABLE_REGEX.match(url)\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task)\n\n @plugin.internet(log)\n def parse_download_page(self, url, task):\n log.verbose('Descargas2020 URL: %s', url)\n\n try:\n page = self.session.get(url)\n except requests.RequestException as e:\n raise UrlRewritingError(e)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n\n torrent_id = None\n url_format = DESCARGAS2020_TORRENT_FORMAT\n\n torrent_id_prog = re.compile(\n r\"(?:parametros\\s*=\\s*\\n?)\\s*{\\s*\\n(?:\\s*'\\w+'\\s*:.*\\n)+\\s*'(?:torrentID|id)'\\s*:\\s*'(\\d+)'\"\n )\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n match = re.search(\n r'function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\".*\\/(\\d+.*)\\\";',\n page.text,\n re.MULTILINE,\n )\n if match:\n torrent_id = match.group(2).rstrip('/')\n\n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n\n return url_format.format(torrent_id)\n\n def search(self, task, entry, config=None):\n if not config:\n log.debug('Descargas2020 disabled')\n return set()\n log.debug('Search Descargas2020')\n url_search = 'http://descargas2020.com/buscar'\n results = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = re.sub(r' \\(\\d\\d\\d\\d\\)$', '', query)\n log.debug('Searching Descargas2020 %s', query)\n query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\n data = {'q': query}\n try:\n response = task.requests.post(url_search, data=data)\n except requests.RequestException as e:\n log.error('Error searching Descargas2020: %s', e)\n return results\n content = response.content\n soup = get_soup(content)\n soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\n children = soup2.findAll('a', href=True)\n for child in children:\n entry = Entry()\n entry['url'] = child['href']\n entry_title = child.find('h2')\n if entry_title is None:\n log.debug('Ignore empty entry')\n continue\n entry_title = entry_title.text\n if not entry_title:\n continue\n try:\n entry_quality_lan = re.search(\n r'.+ \\[([^\\]]+)\\](\\[[^\\]]+\\])+$', entry_title\n ).group(1)\n except AttributeError:\n log.debug('Quality not found')\n continue\n entry_title = re.sub(r' \\[.+]$', '', entry_title)\n entry['title'] = entry_title + ' ' + entry_quality_lan\n results.add(entry)\n log.debug('Finish search Descargas2020 with %d entries', len(results))\n return results\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(\n UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2\n )\n", "path": "flexget/components/sites/sites/descargas2020.py"}]} | 2,940 | 283 |
gh_patches_debug_2489 | rasdani/github-patches | git_diff | rucio__rucio-2776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Account and Scope new types
Motivation
----------
For multi-vo the internal representation of scope and account will need to be different from the external representation. The translations for these should be done in a consistent way and this can be prepared beforehand.
Modification
------------
Create a new type for each of scope and account.
Convert incoming account names and scopes to these types in the API directory so that multi-vo changes are more straight forward.
If normal strings are used in core, raise an error.
</issue>
<code>
[start of lib/rucio/vcsversion.py]
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.20.3',
8 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',
9 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',
10 'revno': 8410
11 }
12
[end of lib/rucio/vcsversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.20.3',
- 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',
- 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',
- 'revno': 8410
+ 'version': '1.20.4rc1',
+ 'branch_nick': 'patch-0-Release__1_20_4rc1_preparation',
+ 'revision_id': '525812b8f83f1069d38ab78aebedb732f21e77ec',
+ 'revno': 8418
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.20.3',\n- 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',\n- 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',\n- 'revno': 8410\n+ 'version': '1.20.4rc1',\n+ 'branch_nick': 'patch-0-Release__1_20_4rc1_preparation',\n+ 'revision_id': '525812b8f83f1069d38ab78aebedb732f21e77ec',\n+ 'revno': 8418\n }\n", "issue": "Account and Scope new types\nMotivation\r\n----------\r\nFor multi-vo the internal representation of scope and account will need to be different from the external representation. The translations for these should be done in a consistent way and this can be prepared beforehand.\r\n\r\n\r\nModification\r\n------------\r\nCreate a new type for each of scope and account. \r\nConvert incoming account names and scopes to these types in the API directory so that multi-vo changes are more straight forward.\r\nIf normal strings are used in core, raise an error.\r\n\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.20.3',\n 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',\n 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',\n 'revno': 8410\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 776 | 254 |
gh_patches_debug_1579 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1758 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError:'db' when capturing elasticsearch span context
**Describe the bug**: ...
**To Reproduce**
1. Instrument via capture serverless
2. Perform an async_scan:
```python
async with AsyncElasticsearch(**elastic_context) as client:
async for doc in async_scan(
client=client,
index='XXX',
query=query,
_source=['field']
):
```

**Environment (please complete the following information)**
- OS: [e.g. Linux] debian:bullseye-slim (python:3.9-slim container image) x86
- Python version: 3.9
- Framework and version [e.g. Django 2.1]: N/A AWS Lambda Function
- APM Server version: 8.6.1
- Agent version: 6.14.0
**Additional context**
Add any other context about the problem here.
- Agent config options <!-- be careful not to post sensitive information -->
<details>
<summary>Click to expand</summary>
```
ELASTIC_APM_LOG_LEVEL: warning
ELASTIC_APM_LAMBDA_APM_SERVER: https://<elastic-cloud-hostname>:443
ELASTIC_APM_SECRET_TOKEN: ${env:ELASTIC_APM_SECRET_TOKEN}
ELASTIC_APM_ENVIRONMENT: ${self:provider.stage}
ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION: 50ms
```
</details>
- `requirements.txt`:
<details>
<summary>Click to expand</summary>
```toml
[tool.poetry.dependencies]
python = "^3.9"
elasticsearch = "^8"
elastic-apm = "^6.14"
tomlkit = "^0.11.1"
uvloop = "^0.17.0"
PyYAML = "^6.0"
aiobotocore = "^2.3"
urllib3 = "^1.26.10"
python-dateutil = "^2.8.2"
pyjarm = "^0.0.5"
aiohttp = "^3.8"
multidict = "^6"
```
</details>
</issue>
<code>
[start of elasticapm/instrumentation/packages/asyncio/elasticsearch.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import elasticapm
32 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule
33 from elasticapm.instrumentation.packages.elasticsearch import (
34 ElasticsearchConnectionInstrumentation,
35 ElasticsearchTransportInstrumentation,
36 )
37 from elasticapm.traces import DroppedSpan, execution_context
38
39
40 class ElasticSearchAsyncConnection(ElasticsearchConnectionInstrumentation, AsyncAbstractInstrumentedModule):
41 name = "elasticsearch_connection"
42
43 def get_instrument_list(self):
44 try:
45 import elastic_transport # noqa: F401
46
47 return [
48 ("elastic_transport._node._http_aiohttp", "AiohttpHttpNode.perform_request"),
49 ]
50 except ImportError:
51 return [
52 ("elasticsearch_async.connection", "AIOHttpConnection.perform_request"),
53 ("elasticsearch._async.http_aiohttp", "AIOHttpConnection.perform_request"),
54 ]
55
56 async def call(self, module, method, wrapped, instance, args, kwargs):
57 span = execution_context.get_span()
58 if not span or isinstance(span, DroppedSpan):
59 return await wrapped(*args, **kwargs)
60
61 self._update_context_by_request_data(span.context, instance, args, kwargs)
62
63 result = await wrapped(*args, **kwargs)
64 if hasattr(result, "meta"): # elasticsearch-py 8.x+
65 status_code = result.meta.status
66 else:
67 status_code = result[0]
68
69 span.context["http"] = {"status_code": status_code}
70
71 return result
72
73
74 class ElasticsearchAsyncTransportInstrumentation(
75 ElasticsearchTransportInstrumentation, AsyncAbstractInstrumentedModule
76 ):
77 name = "elasticsearch_connection"
78
79 instrument_list = [
80 ("elasticsearch._async.transport", "AsyncTransport.perform_request"),
81 ]
82
83 def get_instrument_list(self):
84 try:
85 import elastic_transport # noqa: F401
86
87 return [
88 ("elastic_transport", "AsyncTransport.perform_request"),
89 ]
90 except ImportError:
91 return [
92 ("elasticsearch._async.transport", "AsyncTransport.perform_request"),
93 ]
94
95 async def call(self, module, method, wrapped, instance, args, kwargs):
96 async with elasticapm.async_capture_span(
97 self._get_signature(args, kwargs),
98 span_type="db",
99 span_subtype="elasticsearch",
100 span_action="query",
101 extra={},
102 skip_frames=2,
103 leaf=True,
104 ) as span:
105 result_data = await wrapped(*args, **kwargs)
106
107 hits = self._get_hits(result_data)
108 if hits:
109 span.context["db"]["rows_affected"] = hits
110
111 return result_data
112
[end of elasticapm/instrumentation/packages/asyncio/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py
--- a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py
@@ -106,6 +106,6 @@
hits = self._get_hits(result_data)
if hits:
- span.context["db"]["rows_affected"] = hits
+ span.update_context("db", {"rows_affected": hits})
return result_data
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n@@ -106,6 +106,6 @@\n \n hits = self._get_hits(result_data)\n if hits:\n- span.context[\"db\"][\"rows_affected\"] = hits\n+ span.update_context(\"db\", {\"rows_affected\": hits})\n \n return result_data\n", "issue": "KeyError:'db' when capturing elasticsearch span context\n**Describe the bug**: ...\r\n\r\n**To Reproduce**\r\n\r\n1. Instrument via capture serverless\r\n2. Perform an async_scan:\r\n```python\r\nasync with AsyncElasticsearch(**elastic_context) as client:\r\n async for doc in async_scan(\r\n client=client,\r\n index='XXX',\r\n query=query,\r\n _source=['field']\r\n ):\r\n```\r\n\r\n\r\n\r\n**Environment (please complete the following information)**\r\n- OS: [e.g. Linux] debian:bullseye-slim (python:3.9-slim container image) x86\r\n- Python version: 3.9\r\n- Framework and version [e.g. Django 2.1]: N/A AWS Lambda Function\r\n- APM Server version: 8.6.1\r\n- Agent version: 6.14.0\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options <!-- be careful not to post sensitive information -->\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n ELASTIC_APM_LOG_LEVEL: warning\r\n ELASTIC_APM_LAMBDA_APM_SERVER: https://<elastic-cloud-hostname>:443\r\n ELASTIC_APM_SECRET_TOKEN: ${env:ELASTIC_APM_SECRET_TOKEN}\r\n ELASTIC_APM_ENVIRONMENT: ${self:provider.stage}\r\n ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION: 50ms\r\n ```\r\n </details>\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```toml\r\n [tool.poetry.dependencies]\r\n python = \"^3.9\"\r\n elasticsearch = \"^8\"\r\n elastic-apm = \"^6.14\"\r\n tomlkit = \"^0.11.1\"\r\n uvloop = \"^0.17.0\"\r\n PyYAML = \"^6.0\"\r\n aiobotocore = \"^2.3\"\r\n urllib3 = \"^1.26.10\"\r\n python-dateutil = \"^2.8.2\"\r\n pyjarm = \"^0.0.5\"\r\n aiohttp = \"^3.8\"\r\n multidict = \"^6\"\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.elasticsearch import (\n ElasticsearchConnectionInstrumentation,\n ElasticsearchTransportInstrumentation,\n)\nfrom elasticapm.traces import DroppedSpan, execution_context\n\n\nclass ElasticSearchAsyncConnection(ElasticsearchConnectionInstrumentation, AsyncAbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_aiohttp\", \"AiohttpHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch_async.connection\", \"AIOHttpConnection.perform_request\"),\n (\"elasticsearch._async.http_aiohttp\", \"AIOHttpConnection.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return await wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = await wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n else:\n status_code = result[0]\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return result\n\n\nclass ElasticsearchAsyncTransportInstrumentation(\n ElasticsearchTransportInstrumentation, AsyncAbstractInstrumentedModule\n):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"AsyncTransport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n async with elasticapm.async_capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = await wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n", "path": "elasticapm/instrumentation/packages/asyncio/elasticsearch.py"}]} | 2,231 | 132 |
gh_patches_debug_6983 | rasdani/github-patches | git_diff | frappe__frappe-6179 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"StartTLS is not supported"-Bug in LDAP integrations
`self` is used in the method `authenticate_ldap_user` which results in the Python exception:
```
NameError: global name 'self' is not defined
```
https://github.com/frappe/frappe/blob/6811f0345e35ea521942db4b29425dd71e403ab8/frappe/integrations/doctype/ldap_settings/ldap_settings.py#L89
Since all exceptions are caught regardless their type, a misleading "StartTLS is not supported" error is thrown:
https://github.com/frappe/frappe/blob/6811f0345e35ea521942db4b29425dd71e403ab8/frappe/integrations/doctype/ldap_settings/ldap_settings.py#L95
</issue>
<code>
[start of frappe/integrations/doctype/ldap_settings/ldap_settings.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Frappe Technologies and contributors
3 # For license information, please see license.txt
4
5 from __future__ import unicode_literals
6 import frappe
7 from frappe import _
8 from frappe.utils import cstr
9 from frappe.model.document import Document
10
11 class LDAPSettings(Document):
12 def validate(self):
13 if not self.flags.ignore_mandatory:
14 self.validate_ldap_credentails()
15
16 def validate_ldap_credentails(self):
17 try:
18 import ldap
19 conn = ldap.initialize(self.ldap_server_url)
20 try:
21 if self.ssl_tls_mode == 'StartTLS':
22 conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
23 if self.require_trusted_certificate == 'Yes':
24 conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
25 conn.start_tls_s()
26 except:
27 frappe.throw(_("StartTLS is not supported"))
28
29 conn.simple_bind_s(self.base_dn, self.get_password(raise_exception=False))
30 except ImportError:
31 msg = """
32 <div>
33 {{_("Seems ldap is not installed on system.<br>Guidelines to install ldap dependancies and python package")}},
34 <a href="https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841" target="_blank">{{_("Click here")}}</a>,
35 </div>
36 """
37 frappe.throw(msg, title=_("LDAP Not Installed"))
38
39 except ldap.LDAPError:
40 conn.unbind_s()
41 frappe.throw(_("Incorrect UserId or Password"))
42
43 def get_ldap_settings():
44 try:
45 settings = frappe.get_doc("LDAP Settings")
46
47 settings.update({
48 "method": "frappe.integrations.doctype.ldap_settings.ldap_settings.login"
49 })
50 return settings
51 except Exception:
52 # this will return blank settings
53 return frappe._dict()
54
55 @frappe.whitelist(allow_guest=True)
56 def login():
57 #### LDAP LOGIN LOGIC #####
58 args = frappe.form_dict
59 user = authenticate_ldap_user(frappe.as_unicode(args.usr), frappe.as_unicode(args.pwd))
60
61 frappe.local.login_manager.user = user.name
62 frappe.local.login_manager.post_login()
63
64 # because of a GET request!
65 frappe.db.commit()
66
67 def authenticate_ldap_user(user=None, password=None):
68 dn = None
69 params = {}
70 settings = get_ldap_settings()
71
72 try:
73 import ldap
74 except:
75 msg = """
76 <div>
77 {{_("Seems ldap is not installed on system.")}}<br>
78 <a href"https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841">{{_("Click here")}}</a>,
79 {{_("Guidelines to install ldap dependancies and python")}}
80 </div>
81 """
82 frappe.throw(msg, title=_("LDAP Not Installed"))
83
84 conn = ldap.initialize(settings.ldap_server_url)
85
86 try:
87 try:
88 # set TLS settings for secure connection
89 if self.ssl_tls_mode == 'StartTLS':
90 conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
91 if self.require_trusted_certificate == 'Yes':
92 conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
93 conn.start_tls_s()
94 except:
95 frappe.throw(_("StartTLS is not supported"))
96
97 # simple_bind_s is synchronous binding to server, it takes two param DN and password
98 conn.simple_bind_s(settings.base_dn, settings.get_password(raise_exception=False))
99
100 #search for surnames beginning with a
101 #available options for how deep a search you want.
102 #LDAP_SCOPE_BASE, LDAP_SCOPE_ONELEVEL,LDAP_SCOPE_SUBTREE,
103 result = conn.search_s(settings.organizational_unit, ldap.SCOPE_SUBTREE,
104 settings.ldap_search_string.format(user))
105
106 for dn, r in result:
107 dn = cstr(dn)
108 params["email"] = cstr(r[settings.ldap_email_field][0])
109 params["username"] = cstr(r[settings.ldap_username_field][0])
110 params["first_name"] = cstr(r[settings.ldap_first_name_field][0])
111
112 if dn:
113 conn.simple_bind_s(dn, frappe.as_unicode(password))
114 return create_user(params)
115 else:
116 frappe.throw(_("Not a valid LDAP user"))
117
118 except ldap.LDAPError:
119 conn.unbind_s()
120 frappe.throw(_("Incorrect UserId or Password"))
121
122 def create_user(params):
123 if frappe.db.exists("User", params["email"]):
124 return frappe.get_doc("User", params["email"])
125
126 else:
127 params.update({
128 "doctype": "User",
129 "send_welcome_email": 0,
130 "language": "",
131 "user_type": "System User",
132 "roles": [{
133 "role": _("Blogger")
134 }]
135 })
136
137 user = frappe.get_doc(params).insert(ignore_permissions=True)
138 frappe.db.commit()
139
140 return user
141
[end of frappe/integrations/doctype/ldap_settings/ldap_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/integrations/doctype/ldap_settings/ldap_settings.py b/frappe/integrations/doctype/ldap_settings/ldap_settings.py
--- a/frappe/integrations/doctype/ldap_settings/ldap_settings.py
+++ b/frappe/integrations/doctype/ldap_settings/ldap_settings.py
@@ -86,7 +86,7 @@
try:
try:
# set TLS settings for secure connection
- if self.ssl_tls_mode == 'StartTLS':
+ if settings.ssl_tls_mode == 'StartTLS':
conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
if self.require_trusted_certificate == 'Yes':
conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
| {"golden_diff": "diff --git a/frappe/integrations/doctype/ldap_settings/ldap_settings.py b/frappe/integrations/doctype/ldap_settings/ldap_settings.py\n--- a/frappe/integrations/doctype/ldap_settings/ldap_settings.py\n+++ b/frappe/integrations/doctype/ldap_settings/ldap_settings.py\n@@ -86,7 +86,7 @@\n \ttry:\n \t\ttry:\n \t\t\t# set TLS settings for secure connection\n-\t\t\tif self.ssl_tls_mode == 'StartTLS':\n+\t\t\tif settings.ssl_tls_mode == 'StartTLS':\n \t\t\t\tconn.set_option(ldap.OPT_X_TLS_DEMAND, True)\n \t\t\t\tif self.require_trusted_certificate == 'Yes':\n \t\t\t\t\tconn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)\n", "issue": "\"StartTLS is not supported\"-Bug in LDAP integrations\n`self` is used in the method `authenticate_ldap_user` which results in the Python exception:\r\n```\r\nNameError: global name 'self' is not defined\r\n```\r\nhttps://github.com/frappe/frappe/blob/6811f0345e35ea521942db4b29425dd71e403ab8/frappe/integrations/doctype/ldap_settings/ldap_settings.py#L89\r\n\r\nSince all exceptions are caught regardless their type, a misleading \"StartTLS is not supported\" error is thrown:\r\nhttps://github.com/frappe/frappe/blob/6811f0345e35ea521942db4b29425dd71e403ab8/frappe/integrations/doctype/ldap_settings/ldap_settings.py#L95\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.utils import cstr\nfrom frappe.model.document import Document\n\nclass LDAPSettings(Document):\n\tdef validate(self):\n\t\tif not self.flags.ignore_mandatory:\n\t\t\tself.validate_ldap_credentails()\n\n\tdef validate_ldap_credentails(self):\n\t\ttry:\n\t\t\timport ldap\n\t\t\tconn = ldap.initialize(self.ldap_server_url)\n\t\t\ttry:\n\t\t\t\tif self.ssl_tls_mode == 'StartTLS':\n\t\t\t\t\tconn.set_option(ldap.OPT_X_TLS_DEMAND, True)\n\t\t\t\t\tif self.require_trusted_certificate == 'Yes':\n\t\t\t\t\t\tconn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)\n\t\t\t\t\tconn.start_tls_s()\n\t\t\texcept:\n\t\t\t\tfrappe.throw(_(\"StartTLS is not supported\"))\n\n\t\t\tconn.simple_bind_s(self.base_dn, self.get_password(raise_exception=False))\n\t\texcept ImportError:\n\t\t\tmsg = \"\"\"\n\t\t\t\t<div>\n\t\t\t\t\t{{_(\"Seems ldap is not installed on system.<br>Guidelines to install ldap dependancies and python package\")}},\n\t\t\t\t\t<a href=\"https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841\" target=\"_blank\">{{_(\"Click here\")}}</a>,\n\t\t\t\t</div>\n\t\t\t\"\"\"\n\t\t\tfrappe.throw(msg, title=_(\"LDAP Not Installed\"))\n\n\t\texcept ldap.LDAPError:\n\t\t\tconn.unbind_s()\n\t\t\tfrappe.throw(_(\"Incorrect UserId or Password\"))\n\ndef get_ldap_settings():\n\ttry:\n\t\tsettings = frappe.get_doc(\"LDAP Settings\")\n\n\t\tsettings.update({\n\t\t\t\"method\": \"frappe.integrations.doctype.ldap_settings.ldap_settings.login\"\n\t\t})\n\t\treturn settings\n\texcept Exception:\n\t\t# this will return blank settings\n\t\treturn frappe._dict()\n\[email protected](allow_guest=True)\ndef login():\n\t#### LDAP LOGIN LOGIC #####\n\targs = frappe.form_dict\n\tuser = authenticate_ldap_user(frappe.as_unicode(args.usr), frappe.as_unicode(args.pwd))\n\n\tfrappe.local.login_manager.user = user.name\n\tfrappe.local.login_manager.post_login()\n\n\t# because of a GET request!\n\tfrappe.db.commit()\n\ndef authenticate_ldap_user(user=None, password=None):\n\tdn = None\n\tparams = {}\n\tsettings = get_ldap_settings()\n\n\ttry:\n\t\timport ldap\n\texcept:\n\t\tmsg = \"\"\"\n\t\t\t<div>\n\t\t\t\t{{_(\"Seems ldap is not installed on system.\")}}<br>\n\t\t\t\t<a href\"https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841\">{{_(\"Click here\")}}</a>,\n\t\t\t\t\t{{_(\"Guidelines to install ldap dependancies and python\")}}\n\t\t\t</div>\n\t\t\"\"\"\n\t\tfrappe.throw(msg, title=_(\"LDAP Not Installed\"))\n\n\tconn = ldap.initialize(settings.ldap_server_url)\n\n\ttry:\n\t\ttry:\n\t\t\t# set TLS settings for secure connection\n\t\t\tif self.ssl_tls_mode == 'StartTLS':\n\t\t\t\tconn.set_option(ldap.OPT_X_TLS_DEMAND, True)\n\t\t\t\tif self.require_trusted_certificate == 'Yes':\n\t\t\t\t\tconn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)\n\t\t\t\tconn.start_tls_s()\n\t\texcept:\n\t\t\tfrappe.throw(_(\"StartTLS is not supported\"))\n\n\t\t# simple_bind_s is synchronous binding to server, it takes two param DN and password\n\t\tconn.simple_bind_s(settings.base_dn, settings.get_password(raise_exception=False))\n\n\t\t#search for surnames beginning with a\n\t\t#available options for how deep a search you want.\n\t\t#LDAP_SCOPE_BASE, LDAP_SCOPE_ONELEVEL,LDAP_SCOPE_SUBTREE,\n\t\tresult = conn.search_s(settings.organizational_unit, ldap.SCOPE_SUBTREE,\n\t\t\tsettings.ldap_search_string.format(user))\n\n\t\tfor dn, r in result:\n\t\t\tdn = cstr(dn)\n\t\t\tparams[\"email\"] = cstr(r[settings.ldap_email_field][0])\n\t\t\tparams[\"username\"] = cstr(r[settings.ldap_username_field][0])\n\t\t\tparams[\"first_name\"] = cstr(r[settings.ldap_first_name_field][0])\n\n\t\tif dn:\n\t\t\tconn.simple_bind_s(dn, frappe.as_unicode(password))\n\t\t\treturn create_user(params)\n\t\telse:\n\t\t\tfrappe.throw(_(\"Not a valid LDAP user\"))\n\n\texcept ldap.LDAPError:\n\t\tconn.unbind_s()\n\t\tfrappe.throw(_(\"Incorrect UserId or Password\"))\n\ndef create_user(params):\n\tif frappe.db.exists(\"User\", params[\"email\"]):\n\t\treturn frappe.get_doc(\"User\", params[\"email\"])\n\n\telse:\n\t\tparams.update({\n\t\t\t\"doctype\": \"User\",\n\t\t\t\"send_welcome_email\": 0,\n\t\t\t\"language\": \"\",\n\t\t\t\"user_type\": \"System User\",\n\t\t\t\"roles\": [{\n\t\t\t\t\"role\": _(\"Blogger\")\n\t\t\t}]\n\t\t})\n\n\t\tuser = frappe.get_doc(params).insert(ignore_permissions=True)\n\t\tfrappe.db.commit()\n\n\t\treturn user\n", "path": "frappe/integrations/doctype/ldap_settings/ldap_settings.py"}]} | 2,214 | 167 |
gh_patches_debug_36352 | rasdani/github-patches | git_diff | pypa__virtualenv-1579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Long env path names cause weird errors after activiting virtual environment
After creating a virtualenv in the path
`/home/harald/git-repos/sandbox/stackoverflow/41454203-how-do-you-import-the-library-being-tested-when-running-a-nosetest/env`
I could not run the env's pip,
```
nosetest/env$ ./bin/pip
bash: ./bin/pip: /home/harald/git-repos/sandbox/stackoverflow/41454203-how-do-you-import-the-l: bad interpreter: Permission denied
```
and running just pip gives me the system's pip, with all of its packages.
</issue>
<code>
[start of src/virtualenv/seed/via_app_data/pip_install/base.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import logging
4 import os
5 import re
6 import shutil
7 import zipfile
8 from abc import ABCMeta, abstractmethod
9 from tempfile import mkdtemp
10 from textwrap import dedent
11
12 import six
13 from six import PY3
14
15 from virtualenv.info import IS_WIN
16 from virtualenv.util import ConfigParser
17 from virtualenv.util.path import Path, make_exe
18
19
20 @six.add_metaclass(ABCMeta)
21 class PipInstall(object):
22 def __init__(self, wheel, creator, image_folder):
23 self._wheel = wheel
24 self._creator = creator
25 self._image_dir = image_folder
26 self._extracted = False
27 self.__dist_info = None
28 self._console_entry_points = None
29
30 @abstractmethod
31 def _sync(self, src, dst):
32 raise NotImplementedError
33
34 def install(self):
35 self._extracted = True
36 # sync image
37 for filename in self._image_dir.iterdir():
38 into = self._creator.purelib / filename.name
39 if into.exists():
40 if into.is_dir() and not into.is_symlink():
41 shutil.rmtree(str(into))
42 else:
43 into.unlink()
44 self._sync(filename, into)
45 # generate console executables
46 consoles = set()
47 script_dir = self._creator.script_dir
48 for name, module in self._console_scripts.items():
49 consoles.update(self._create_console_entry_point(name, module, script_dir))
50 logging.debug("generated console scripts %s", " ".join(i.name for i in consoles))
51
52 def build_image(self):
53 # 1. first extract the wheel
54 logging.debug("build install image to %s of %s", self._image_dir, self._wheel.name)
55 with zipfile.ZipFile(str(self._wheel)) as zip_ref:
56 zip_ref.extractall(str(self._image_dir))
57 self._extracted = True
58 # 2. now add additional files not present in the package
59 new_files = self._generate_new_files()
60 # 3. finally fix the records file
61 self._fix_records(new_files)
62
63 def _records_text(self, files):
64 record_data = "\n".join(
65 "{},,".format(os.path.relpath(six.ensure_text(str(rec)), six.ensure_text(str(self._image_dir))))
66 for rec in files
67 )
68 return record_data
69
70 def _generate_new_files(self):
71 new_files = set()
72 installer = self._dist_info / "INSTALLER"
73 installer.write_text("pip\n")
74 new_files.add(installer)
75 # inject a no-op root element, as workaround for bug added
76 # by https://github.com/pypa/pip/commit/c7ae06c79#r35523722
77 marker = self._image_dir / "{}.virtualenv".format(self._dist_info.name)
78 marker.write_text("")
79 new_files.add(marker)
80 folder = mkdtemp()
81 try:
82 to_folder = Path(folder)
83 rel = os.path.relpath(
84 six.ensure_text(str(self._creator.script_dir)), six.ensure_text(str(self._creator.purelib))
85 )
86 for name, module in self._console_scripts.items():
87 new_files.update(
88 Path(os.path.normpath(six.ensure_text(str(self._image_dir / rel / i.name))))
89 for i in self._create_console_entry_point(name, module, to_folder)
90 )
91 finally:
92 shutil.rmtree(folder, ignore_errors=True)
93 return new_files
94
95 @property
96 def _dist_info(self):
97 if self._extracted is False:
98 return None # pragma: no cover
99 if self.__dist_info is None:
100 for filename in self._image_dir.iterdir():
101 if filename.suffix == ".dist-info":
102 self.__dist_info = filename
103 break
104 else:
105 raise RuntimeError("no dist info") # pragma: no cover
106 return self.__dist_info
107
108 @abstractmethod
109 def _fix_records(self, extra_record_data):
110 raise NotImplementedError
111
112 @property
113 def _console_scripts(self):
114 if self._extracted is False:
115 return None # pragma: no cover
116 if self._console_entry_points is None:
117 self._console_entry_points = {}
118 entry_points = self._dist_info / "entry_points.txt"
119 if entry_points.exists():
120 parser = ConfigParser.ConfigParser()
121 with entry_points.open() as file_handler:
122 reader = getattr(parser, "read_file" if PY3 else "readfp")
123 reader(file_handler)
124 if "console_scripts" in parser.sections():
125 for name, value in parser.items("console_scripts"):
126 match = re.match(r"(.*?)-?\d\.?\d*", name)
127 if match:
128 name = match.groups(1)[0]
129 self._console_entry_points[name] = value
130 return self._console_entry_points
131
132 def _create_console_entry_point(self, name, value, to_folder):
133 result = []
134 if IS_WIN:
135 # windows doesn't support simple script files, so fallback to more complicated exe generator
136 from distlib.scripts import ScriptMaker
137
138 maker = ScriptMaker(None, str(to_folder))
139 maker.clobber = True # overwrite
140 maker.variants = {"", "X", "X.Y"} # create all variants
141 maker.set_mode = True # ensure they are executable
142 maker.executable = str(self._creator.exe)
143 specification = "{} = {}".format(name, value)
144 new_files = maker.make(specification)
145 result.extend(Path(i) for i in new_files)
146 else:
147 module, func = value.split(":")
148 content = (
149 dedent(
150 """
151 #!{0}
152 # -*- coding: utf-8 -*-
153 import re
154 import sys
155
156 from {1} import {2}
157
158 if __name__ == "__main__":
159 sys.argv[0] = re.sub(r"(-script.pyw?|.exe)?$", "", sys.argv[0])
160 sys.exit({2}())
161 """
162 )
163 .lstrip()
164 .format(self._creator.exe, module, func)
165 )
166
167 version = self._creator.interpreter.version_info
168 for new_name in (
169 name,
170 "{}{}".format(name, version.major),
171 "{}-{}.{}".format(name, version.major, version.minor),
172 ):
173 exe = to_folder / new_name
174 exe.write_text(content, encoding="utf-8")
175 make_exe(exe)
176 result.append(exe)
177 return result
178
179 def clear(self):
180 if self._image_dir.exists():
181 shutil.rmtree(six.ensure_text(str(self._image_dir)))
182
183 def has_image(self):
184 return self._image_dir.exists() and next(self._image_dir.iterdir()) is not None
185
[end of src/virtualenv/seed/via_app_data/pip_install/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/seed/via_app_data/pip_install/base.py b/src/virtualenv/seed/via_app_data/pip_install/base.py
--- a/src/virtualenv/seed/via_app_data/pip_install/base.py
+++ b/src/virtualenv/seed/via_app_data/pip_install/base.py
@@ -7,14 +7,12 @@
import zipfile
from abc import ABCMeta, abstractmethod
from tempfile import mkdtemp
-from textwrap import dedent
import six
from six import PY3
-from virtualenv.info import IS_WIN
from virtualenv.util import ConfigParser
-from virtualenv.util.path import Path, make_exe
+from virtualenv.util.path import Path
@six.add_metaclass(ABCMeta)
@@ -131,49 +129,16 @@
def _create_console_entry_point(self, name, value, to_folder):
result = []
- if IS_WIN:
- # windows doesn't support simple script files, so fallback to more complicated exe generator
- from distlib.scripts import ScriptMaker
-
- maker = ScriptMaker(None, str(to_folder))
- maker.clobber = True # overwrite
- maker.variants = {"", "X", "X.Y"} # create all variants
- maker.set_mode = True # ensure they are executable
- maker.executable = str(self._creator.exe)
- specification = "{} = {}".format(name, value)
- new_files = maker.make(specification)
- result.extend(Path(i) for i in new_files)
- else:
- module, func = value.split(":")
- content = (
- dedent(
- """
- #!{0}
- # -*- coding: utf-8 -*-
- import re
- import sys
-
- from {1} import {2}
-
- if __name__ == "__main__":
- sys.argv[0] = re.sub(r"(-script.pyw?|.exe)?$", "", sys.argv[0])
- sys.exit({2}())
- """
- )
- .lstrip()
- .format(self._creator.exe, module, func)
- )
-
- version = self._creator.interpreter.version_info
- for new_name in (
- name,
- "{}{}".format(name, version.major),
- "{}-{}.{}".format(name, version.major, version.minor),
- ):
- exe = to_folder / new_name
- exe.write_text(content, encoding="utf-8")
- make_exe(exe)
- result.append(exe)
+ from distlib.scripts import ScriptMaker
+
+ maker = ScriptMaker(None, str(to_folder))
+ maker.clobber = True # overwrite
+ maker.variants = {"", "X", "X.Y"} # create all variants
+ maker.set_mode = True # ensure they are executable
+ maker.executable = str(self._creator.exe)
+ specification = "{} = {}".format(name, value)
+ new_files = maker.make(specification)
+ result.extend(Path(i) for i in new_files)
return result
def clear(self):
| {"golden_diff": "diff --git a/src/virtualenv/seed/via_app_data/pip_install/base.py b/src/virtualenv/seed/via_app_data/pip_install/base.py\n--- a/src/virtualenv/seed/via_app_data/pip_install/base.py\n+++ b/src/virtualenv/seed/via_app_data/pip_install/base.py\n@@ -7,14 +7,12 @@\n import zipfile\n from abc import ABCMeta, abstractmethod\n from tempfile import mkdtemp\n-from textwrap import dedent\n \n import six\n from six import PY3\n \n-from virtualenv.info import IS_WIN\n from virtualenv.util import ConfigParser\n-from virtualenv.util.path import Path, make_exe\n+from virtualenv.util.path import Path\n \n \n @six.add_metaclass(ABCMeta)\n@@ -131,49 +129,16 @@\n \n def _create_console_entry_point(self, name, value, to_folder):\n result = []\n- if IS_WIN:\n- # windows doesn't support simple script files, so fallback to more complicated exe generator\n- from distlib.scripts import ScriptMaker\n-\n- maker = ScriptMaker(None, str(to_folder))\n- maker.clobber = True # overwrite\n- maker.variants = {\"\", \"X\", \"X.Y\"} # create all variants\n- maker.set_mode = True # ensure they are executable\n- maker.executable = str(self._creator.exe)\n- specification = \"{} = {}\".format(name, value)\n- new_files = maker.make(specification)\n- result.extend(Path(i) for i in new_files)\n- else:\n- module, func = value.split(\":\")\n- content = (\n- dedent(\n- \"\"\"\n- #!{0}\n- # -*- coding: utf-8 -*-\n- import re\n- import sys\n-\n- from {1} import {2}\n-\n- if __name__ == \"__main__\":\n- sys.argv[0] = re.sub(r\"(-script.pyw?|.exe)?$\", \"\", sys.argv[0])\n- sys.exit({2}())\n- \"\"\"\n- )\n- .lstrip()\n- .format(self._creator.exe, module, func)\n- )\n-\n- version = self._creator.interpreter.version_info\n- for new_name in (\n- name,\n- \"{}{}\".format(name, version.major),\n- \"{}-{}.{}\".format(name, version.major, version.minor),\n- ):\n- exe = to_folder / new_name\n- exe.write_text(content, encoding=\"utf-8\")\n- make_exe(exe)\n- result.append(exe)\n+ from distlib.scripts import ScriptMaker\n+\n+ maker = ScriptMaker(None, str(to_folder))\n+ maker.clobber = True # overwrite\n+ maker.variants = {\"\", \"X\", \"X.Y\"} # create all variants\n+ maker.set_mode = True # ensure they are executable\n+ maker.executable = str(self._creator.exe)\n+ specification = \"{} = {}\".format(name, value)\n+ new_files = maker.make(specification)\n+ result.extend(Path(i) for i in new_files)\n return result\n \n def clear(self):\n", "issue": "Long env path names cause weird errors after activiting virtual environment\nAfter creating a virtualenv in the path\r\n\r\n`/home/harald/git-repos/sandbox/stackoverflow/41454203-how-do-you-import-the-library-being-tested-when-running-a-nosetest/env`\r\n\r\nI could not run the env's pip,\r\n\r\n```\r\nnosetest/env$ ./bin/pip\r\nbash: ./bin/pip: /home/harald/git-repos/sandbox/stackoverflow/41454203-how-do-you-import-the-l: bad interpreter: Permission denied\r\n```\r\n\r\nand running just pip gives me the system's pip, with all of its packages.\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport logging\nimport os\nimport re\nimport shutil\nimport zipfile\nfrom abc import ABCMeta, abstractmethod\nfrom tempfile import mkdtemp\nfrom textwrap import dedent\n\nimport six\nfrom six import PY3\n\nfrom virtualenv.info import IS_WIN\nfrom virtualenv.util import ConfigParser\nfrom virtualenv.util.path import Path, make_exe\n\n\[email protected]_metaclass(ABCMeta)\nclass PipInstall(object):\n def __init__(self, wheel, creator, image_folder):\n self._wheel = wheel\n self._creator = creator\n self._image_dir = image_folder\n self._extracted = False\n self.__dist_info = None\n self._console_entry_points = None\n\n @abstractmethod\n def _sync(self, src, dst):\n raise NotImplementedError\n\n def install(self):\n self._extracted = True\n # sync image\n for filename in self._image_dir.iterdir():\n into = self._creator.purelib / filename.name\n if into.exists():\n if into.is_dir() and not into.is_symlink():\n shutil.rmtree(str(into))\n else:\n into.unlink()\n self._sync(filename, into)\n # generate console executables\n consoles = set()\n script_dir = self._creator.script_dir\n for name, module in self._console_scripts.items():\n consoles.update(self._create_console_entry_point(name, module, script_dir))\n logging.debug(\"generated console scripts %s\", \" \".join(i.name for i in consoles))\n\n def build_image(self):\n # 1. first extract the wheel\n logging.debug(\"build install image to %s of %s\", self._image_dir, self._wheel.name)\n with zipfile.ZipFile(str(self._wheel)) as zip_ref:\n zip_ref.extractall(str(self._image_dir))\n self._extracted = True\n # 2. now add additional files not present in the package\n new_files = self._generate_new_files()\n # 3. finally fix the records file\n self._fix_records(new_files)\n\n def _records_text(self, files):\n record_data = \"\\n\".join(\n \"{},,\".format(os.path.relpath(six.ensure_text(str(rec)), six.ensure_text(str(self._image_dir))))\n for rec in files\n )\n return record_data\n\n def _generate_new_files(self):\n new_files = set()\n installer = self._dist_info / \"INSTALLER\"\n installer.write_text(\"pip\\n\")\n new_files.add(installer)\n # inject a no-op root element, as workaround for bug added\n # by https://github.com/pypa/pip/commit/c7ae06c79#r35523722\n marker = self._image_dir / \"{}.virtualenv\".format(self._dist_info.name)\n marker.write_text(\"\")\n new_files.add(marker)\n folder = mkdtemp()\n try:\n to_folder = Path(folder)\n rel = os.path.relpath(\n six.ensure_text(str(self._creator.script_dir)), six.ensure_text(str(self._creator.purelib))\n )\n for name, module in self._console_scripts.items():\n new_files.update(\n Path(os.path.normpath(six.ensure_text(str(self._image_dir / rel / i.name))))\n for i in self._create_console_entry_point(name, module, to_folder)\n )\n finally:\n shutil.rmtree(folder, ignore_errors=True)\n return new_files\n\n @property\n def _dist_info(self):\n if self._extracted is False:\n return None # pragma: no cover\n if self.__dist_info is None:\n for filename in self._image_dir.iterdir():\n if filename.suffix == \".dist-info\":\n self.__dist_info = filename\n break\n else:\n raise RuntimeError(\"no dist info\") # pragma: no cover\n return self.__dist_info\n\n @abstractmethod\n def _fix_records(self, extra_record_data):\n raise NotImplementedError\n\n @property\n def _console_scripts(self):\n if self._extracted is False:\n return None # pragma: no cover\n if self._console_entry_points is None:\n self._console_entry_points = {}\n entry_points = self._dist_info / \"entry_points.txt\"\n if entry_points.exists():\n parser = ConfigParser.ConfigParser()\n with entry_points.open() as file_handler:\n reader = getattr(parser, \"read_file\" if PY3 else \"readfp\")\n reader(file_handler)\n if \"console_scripts\" in parser.sections():\n for name, value in parser.items(\"console_scripts\"):\n match = re.match(r\"(.*?)-?\\d\\.?\\d*\", name)\n if match:\n name = match.groups(1)[0]\n self._console_entry_points[name] = value\n return self._console_entry_points\n\n def _create_console_entry_point(self, name, value, to_folder):\n result = []\n if IS_WIN:\n # windows doesn't support simple script files, so fallback to more complicated exe generator\n from distlib.scripts import ScriptMaker\n\n maker = ScriptMaker(None, str(to_folder))\n maker.clobber = True # overwrite\n maker.variants = {\"\", \"X\", \"X.Y\"} # create all variants\n maker.set_mode = True # ensure they are executable\n maker.executable = str(self._creator.exe)\n specification = \"{} = {}\".format(name, value)\n new_files = maker.make(specification)\n result.extend(Path(i) for i in new_files)\n else:\n module, func = value.split(\":\")\n content = (\n dedent(\n \"\"\"\n #!{0}\n # -*- coding: utf-8 -*-\n import re\n import sys\n\n from {1} import {2}\n\n if __name__ == \"__main__\":\n sys.argv[0] = re.sub(r\"(-script.pyw?|.exe)?$\", \"\", sys.argv[0])\n sys.exit({2}())\n \"\"\"\n )\n .lstrip()\n .format(self._creator.exe, module, func)\n )\n\n version = self._creator.interpreter.version_info\n for new_name in (\n name,\n \"{}{}\".format(name, version.major),\n \"{}-{}.{}\".format(name, version.major, version.minor),\n ):\n exe = to_folder / new_name\n exe.write_text(content, encoding=\"utf-8\")\n make_exe(exe)\n result.append(exe)\n return result\n\n def clear(self):\n if self._image_dir.exists():\n shutil.rmtree(six.ensure_text(str(self._image_dir)))\n\n def has_image(self):\n return self._image_dir.exists() and next(self._image_dir.iterdir()) is not None\n", "path": "src/virtualenv/seed/via_app_data/pip_install/base.py"}]} | 2,615 | 700 |
gh_patches_debug_35026 | rasdani/github-patches | git_diff | vyperlang__vyper-1400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deployable bytecode attempts to access calldata
Hi There,
I'm trying to understand vyper constructor usage patterns. When compiling the example Vyper [erc20 contract](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20.vy), the output (deployed) bytecode looks like this:
```
0x600035...000f3
```
The second opcode `0x35` is `CALLDATALOAD`. Given that in the EVM specification, contract creation calls assume `calldata == []`, how is calldata used in this case?
In Solidity, a comparable example would not include `CALLDATALOAD` in the executable part of deployed bytecode, reserving it instead for the runtime bytecode portion.
Let me know if I'm misunderstanding!
</issue>
<code>
[start of vyper/parser/parser.py]
1 import ast
2 from typing import (
3 List,
4 cast,
5 )
6
7 from vyper.exceptions import (
8 EventDeclarationException,
9 FunctionDeclarationException,
10 ParserException,
11 StructureException,
12 )
13 from vyper.parser.function_definitions import (
14 is_default_func,
15 is_initializer,
16 parse_function,
17 )
18 from vyper.parser.global_context import (
19 GlobalContext,
20 )
21 from vyper.parser.lll_node import (
22 LLLnode,
23 )
24 from vyper.parser.parser_utils import (
25 annotate_and_optimize_ast,
26 )
27 from vyper.parser.pre_parser import (
28 pre_parse,
29 )
30 from vyper.signatures import (
31 sig_utils,
32 )
33 from vyper.signatures.event_signature import (
34 EventSignature,
35 )
36 from vyper.signatures.function_signature import (
37 FunctionSignature,
38 )
39 from vyper.signatures.interface import (
40 check_valid_contract_interface,
41 )
42 from vyper.utils import (
43 LOADED_LIMIT_MAP,
44 )
45
46 if not hasattr(ast, 'AnnAssign'):
47 raise Exception("Requires python 3.6 or higher for annotation support")
48
49
50 # Header code
51 INITIALIZER_LIST = ['seq', ['mstore', 28, ['calldataload', 0]]]
52 # Store limit constants at fixed addresses in memory.
53 INITIALIZER_LIST += [['mstore', pos, limit_size] for pos, limit_size in LOADED_LIMIT_MAP.items()]
54 INITIALIZER_LLL = LLLnode.from_list(INITIALIZER_LIST, typ=None)
55
56
57 def parse_to_ast(source_code: str) -> List[ast.stmt]:
58 """
59 Parses the given vyper source code and returns a list of python AST objects
60 for all statements in the source. Performs pre-processing of source code
61 before parsing as well as post-processing of the resulting AST.
62
63 :param source_code: The vyper source code to be parsed.
64 :return: The post-processed list of python AST objects for each statement in
65 ``source_code``.
66 """
67 class_types, reformatted_code = pre_parse(source_code)
68
69 if '\x00' in reformatted_code:
70 raise ParserException('No null bytes (\\x00) allowed in the source code.')
71
72 # The return type depends on the parse mode which is why we need to cast here
73 parsed_ast = cast(ast.Module, ast.parse(reformatted_code))
74 annotate_and_optimize_ast(parsed_ast, reformatted_code, class_types)
75
76 return parsed_ast.body
77
78
79 def parse_events(sigs, global_ctx):
80 for event in global_ctx._events:
81 sigs[event.target.id] = EventSignature.from_declaration(event, global_ctx)
82 return sigs
83
84
85 def parse_external_contracts(external_contracts, global_ctx):
86 for _contractname in global_ctx._contracts:
87 _contract_defs = global_ctx._contracts[_contractname]
88 _defnames = [_def.name for _def in _contract_defs]
89 contract = {}
90 if len(set(_defnames)) < len(_contract_defs):
91 raise FunctionDeclarationException(
92 "Duplicate function name: %s" % (
93 [name for name in _defnames if _defnames.count(name) > 1][0]
94 )
95 )
96
97 for _def in _contract_defs:
98 constant = False
99 # test for valid call type keyword.
100 if len(_def.body) == 1 and \
101 isinstance(_def.body[0], ast.Expr) and \
102 isinstance(_def.body[0].value, ast.Name) and \
103 _def.body[0].value.id in ('modifying', 'constant'):
104 constant = True if _def.body[0].value.id == 'constant' else False
105 else:
106 raise StructureException('constant or modifying call type must be specified', _def)
107 # Recognizes already-defined structs
108 sig = FunctionSignature.from_definition(
109 _def,
110 contract_def=True,
111 constant=constant,
112 custom_structs=global_ctx._structs,
113 constants=global_ctx._constants
114 )
115 contract[sig.name] = sig
116 external_contracts[_contractname] = contract
117
118 for interface_name, interface in global_ctx._interfaces.items():
119 external_contracts[interface_name] = {
120 sig.name: sig
121 for sig in interface
122 if isinstance(sig, FunctionSignature)
123 }
124
125 return external_contracts
126
127
128 def parse_other_functions(o,
129 otherfuncs,
130 sigs,
131 external_contracts,
132 origcode,
133 global_ctx,
134 default_function,
135 runtime_only):
136 sub = ['seq', INITIALIZER_LLL]
137 add_gas = INITIALIZER_LLL.gas
138 for _def in otherfuncs:
139 sub.append(
140 parse_function(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)
141 )
142 sub[-1].total_gas += add_gas
143 add_gas += 30
144 for sig in sig_utils.generate_default_arg_sigs(_def, external_contracts, global_ctx):
145 sig.gas = sub[-1].total_gas
146 sigs[sig.sig] = sig
147
148 # Add fallback function
149 if default_function:
150 default_func = parse_function(
151 default_function[0],
152 {**{'self': sigs}, **external_contracts},
153 origcode,
154 global_ctx,
155 )
156 sub.append(default_func)
157 else:
158 sub.append(LLLnode.from_list(['revert', 0, 0], typ=None, annotation='Default function'))
159 if runtime_only:
160 return sub
161 else:
162 o.append(['return', 0, ['lll', sub, 0]])
163 return o
164
165
166 # Main python parse tree => LLL method
167 def parse_tree_to_lll(code, origcode, runtime_only=False, interface_codes=None):
168 global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)
169 _names_def = [_def.name for _def in global_ctx._defs]
170 # Checks for duplicate function names
171 if len(set(_names_def)) < len(_names_def):
172 raise FunctionDeclarationException(
173 "Duplicate function name: %s" % (
174 [name for name in _names_def if _names_def.count(name) > 1][0]
175 )
176 )
177 _names_events = [_event.target.id for _event in global_ctx._events]
178 # Checks for duplicate event names
179 if len(set(_names_events)) < len(_names_events):
180 raise EventDeclarationException(
181 "Duplicate event name: %s" % (
182 [name for name in _names_events if _names_events.count(name) > 1][0]
183 )
184 )
185 # Initialization function
186 initfunc = [_def for _def in global_ctx._defs if is_initializer(_def)]
187 # Default function
188 defaultfunc = [_def for _def in global_ctx._defs if is_default_func(_def)]
189 # Regular functions
190 otherfuncs = [
191 _def
192 for _def
193 in global_ctx._defs
194 if not is_initializer(_def) and not is_default_func(_def)
195 ]
196 sigs = {}
197 external_contracts = {}
198 # Create the main statement
199 o = ['seq']
200 if global_ctx._events:
201 sigs = parse_events(sigs, global_ctx)
202 if global_ctx._contracts or global_ctx._interfaces:
203 external_contracts = parse_external_contracts(external_contracts, global_ctx)
204 # If there is an init func...
205 if initfunc:
206 o.append(INITIALIZER_LLL)
207 o.append(
208 parse_function(
209 initfunc[0],
210 {**{'self': sigs}, **external_contracts},
211 origcode,
212 global_ctx,
213 )
214 )
215 # If there are regular functions...
216 if otherfuncs or defaultfunc:
217 o = parse_other_functions(
218 o, otherfuncs, sigs, external_contracts, origcode, global_ctx, defaultfunc, runtime_only
219 )
220
221 # Check if interface of contract is correct.
222 check_valid_contract_interface(global_ctx, sigs)
223
224 return LLLnode.from_list(o, typ=None)
225
226
227 def parse_to_lll(kode, runtime_only=False, interface_codes=None):
228 code = parse_to_ast(kode)
229 return parse_tree_to_lll(code, kode, runtime_only=runtime_only, interface_codes=interface_codes)
230
[end of vyper/parser/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/parser/parser.py b/vyper/parser/parser.py
--- a/vyper/parser/parser.py
+++ b/vyper/parser/parser.py
@@ -1,5 +1,6 @@
import ast
from typing import (
+ Any,
List,
cast,
)
@@ -46,6 +47,20 @@
if not hasattr(ast, 'AnnAssign'):
raise Exception("Requires python 3.6 or higher for annotation support")
+# Header code
+STORE_CALLDATA: List[Any] = ['seq', ['mstore', 28, ['calldataload', 0]]]
+# Store limit constants at fixed addresses in memory.
+LIMIT_MEMORY_SET: List[Any] = [
+ ['mstore', pos, limit_size]
+ for pos, limit_size in LOADED_LIMIT_MAP.items()
+]
+FUNC_INIT_LLL = LLLnode.from_list(
+ STORE_CALLDATA + LIMIT_MEMORY_SET, typ=None
+)
+INIT_FUNC_INIT_LLL = LLLnode.from_list(
+ ['seq'] + LIMIT_MEMORY_SET, typ=None
+)
+
# Header code
INITIALIZER_LIST = ['seq', ['mstore', 28, ['calldataload', 0]]]
@@ -133,8 +148,9 @@
global_ctx,
default_function,
runtime_only):
- sub = ['seq', INITIALIZER_LLL]
- add_gas = INITIALIZER_LLL.gas
+ sub = ['seq', FUNC_INIT_LLL]
+ add_gas = FUNC_INIT_LLL.gas
+
for _def in otherfuncs:
sub.append(
parse_function(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)
@@ -203,15 +219,14 @@
external_contracts = parse_external_contracts(external_contracts, global_ctx)
# If there is an init func...
if initfunc:
- o.append(INITIALIZER_LLL)
- o.append(
- parse_function(
- initfunc[0],
- {**{'self': sigs}, **external_contracts},
- origcode,
- global_ctx,
- )
- )
+ o.append(INIT_FUNC_INIT_LLL)
+ o.append(parse_function(
+ initfunc[0],
+ {**{'self': sigs}, **external_contracts},
+ origcode,
+ global_ctx,
+ ))
+
# If there are regular functions...
if otherfuncs or defaultfunc:
o = parse_other_functions(
| {"golden_diff": "diff --git a/vyper/parser/parser.py b/vyper/parser/parser.py\n--- a/vyper/parser/parser.py\n+++ b/vyper/parser/parser.py\n@@ -1,5 +1,6 @@\n import ast\n from typing import (\n+ Any,\n List,\n cast,\n )\n@@ -46,6 +47,20 @@\n if not hasattr(ast, 'AnnAssign'):\n raise Exception(\"Requires python 3.6 or higher for annotation support\")\n \n+# Header code\n+STORE_CALLDATA: List[Any] = ['seq', ['mstore', 28, ['calldataload', 0]]]\n+# Store limit constants at fixed addresses in memory.\n+LIMIT_MEMORY_SET: List[Any] = [\n+ ['mstore', pos, limit_size]\n+ for pos, limit_size in LOADED_LIMIT_MAP.items()\n+]\n+FUNC_INIT_LLL = LLLnode.from_list(\n+ STORE_CALLDATA + LIMIT_MEMORY_SET, typ=None\n+)\n+INIT_FUNC_INIT_LLL = LLLnode.from_list(\n+ ['seq'] + LIMIT_MEMORY_SET, typ=None\n+)\n+\n \n # Header code\n INITIALIZER_LIST = ['seq', ['mstore', 28, ['calldataload', 0]]]\n@@ -133,8 +148,9 @@\n global_ctx,\n default_function,\n runtime_only):\n- sub = ['seq', INITIALIZER_LLL]\n- add_gas = INITIALIZER_LLL.gas\n+ sub = ['seq', FUNC_INIT_LLL]\n+ add_gas = FUNC_INIT_LLL.gas\n+\n for _def in otherfuncs:\n sub.append(\n parse_function(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)\n@@ -203,15 +219,14 @@\n external_contracts = parse_external_contracts(external_contracts, global_ctx)\n # If there is an init func...\n if initfunc:\n- o.append(INITIALIZER_LLL)\n- o.append(\n- parse_function(\n- initfunc[0],\n- {**{'self': sigs}, **external_contracts},\n- origcode,\n- global_ctx,\n- )\n- )\n+ o.append(INIT_FUNC_INIT_LLL)\n+ o.append(parse_function(\n+ initfunc[0],\n+ {**{'self': sigs}, **external_contracts},\n+ origcode,\n+ global_ctx,\n+ ))\n+\n # If there are regular functions...\n if otherfuncs or defaultfunc:\n o = parse_other_functions(\n", "issue": "Deployable bytecode attempts to access calldata\nHi There,\r\n\r\nI'm trying to understand vyper constructor usage patterns. When compiling the example Vyper [erc20 contract](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20.vy), the output (deployed) bytecode looks like this:\r\n\r\n```\r\n0x600035...000f3\r\n```\r\n\r\nThe second opcode `0x35` is `CALLDATALOAD`. Given that in the EVM specification, contract creation calls assume `calldata == []`, how is calldata used in this case?\r\n\r\nIn Solidity, a comparable example would not include `CALLDATALOAD` in the executable part of deployed bytecode, reserving it instead for the runtime bytecode portion.\r\n\r\nLet me know if I'm misunderstanding!\n", "before_files": [{"content": "import ast\nfrom typing import (\n List,\n cast,\n)\n\nfrom vyper.exceptions import (\n EventDeclarationException,\n FunctionDeclarationException,\n ParserException,\n StructureException,\n)\nfrom vyper.parser.function_definitions import (\n is_default_func,\n is_initializer,\n parse_function,\n)\nfrom vyper.parser.global_context import (\n GlobalContext,\n)\nfrom vyper.parser.lll_node import (\n LLLnode,\n)\nfrom vyper.parser.parser_utils import (\n annotate_and_optimize_ast,\n)\nfrom vyper.parser.pre_parser import (\n pre_parse,\n)\nfrom vyper.signatures import (\n sig_utils,\n)\nfrom vyper.signatures.event_signature import (\n EventSignature,\n)\nfrom vyper.signatures.function_signature import (\n FunctionSignature,\n)\nfrom vyper.signatures.interface import (\n check_valid_contract_interface,\n)\nfrom vyper.utils import (\n LOADED_LIMIT_MAP,\n)\n\nif not hasattr(ast, 'AnnAssign'):\n raise Exception(\"Requires python 3.6 or higher for annotation support\")\n\n\n# Header code\nINITIALIZER_LIST = ['seq', ['mstore', 28, ['calldataload', 0]]]\n# Store limit constants at fixed addresses in memory.\nINITIALIZER_LIST += [['mstore', pos, limit_size] for pos, limit_size in LOADED_LIMIT_MAP.items()]\nINITIALIZER_LLL = LLLnode.from_list(INITIALIZER_LIST, typ=None)\n\n\ndef parse_to_ast(source_code: str) -> List[ast.stmt]:\n \"\"\"\n Parses the given vyper source code and returns a list of python AST objects\n for all statements in the source. Performs pre-processing of source code\n before parsing as well as post-processing of the resulting AST.\n\n :param source_code: The vyper source code to be parsed.\n :return: The post-processed list of python AST objects for each statement in\n ``source_code``.\n \"\"\"\n class_types, reformatted_code = pre_parse(source_code)\n\n if '\\x00' in reformatted_code:\n raise ParserException('No null bytes (\\\\x00) allowed in the source code.')\n\n # The return type depends on the parse mode which is why we need to cast here\n parsed_ast = cast(ast.Module, ast.parse(reformatted_code))\n annotate_and_optimize_ast(parsed_ast, reformatted_code, class_types)\n\n return parsed_ast.body\n\n\ndef parse_events(sigs, global_ctx):\n for event in global_ctx._events:\n sigs[event.target.id] = EventSignature.from_declaration(event, global_ctx)\n return sigs\n\n\ndef parse_external_contracts(external_contracts, global_ctx):\n for _contractname in global_ctx._contracts:\n _contract_defs = global_ctx._contracts[_contractname]\n _defnames = [_def.name for _def in _contract_defs]\n contract = {}\n if len(set(_defnames)) < len(_contract_defs):\n raise FunctionDeclarationException(\n \"Duplicate function name: %s\" % (\n [name for name in _defnames if _defnames.count(name) > 1][0]\n )\n )\n\n for _def in _contract_defs:\n constant = False\n # test for valid call type keyword.\n if len(_def.body) == 1 and \\\n isinstance(_def.body[0], ast.Expr) and \\\n isinstance(_def.body[0].value, ast.Name) and \\\n _def.body[0].value.id in ('modifying', 'constant'):\n constant = True if _def.body[0].value.id == 'constant' else False\n else:\n raise StructureException('constant or modifying call type must be specified', _def)\n # Recognizes already-defined structs\n sig = FunctionSignature.from_definition(\n _def,\n contract_def=True,\n constant=constant,\n custom_structs=global_ctx._structs,\n constants=global_ctx._constants\n )\n contract[sig.name] = sig\n external_contracts[_contractname] = contract\n\n for interface_name, interface in global_ctx._interfaces.items():\n external_contracts[interface_name] = {\n sig.name: sig\n for sig in interface\n if isinstance(sig, FunctionSignature)\n }\n\n return external_contracts\n\n\ndef parse_other_functions(o,\n otherfuncs,\n sigs,\n external_contracts,\n origcode,\n global_ctx,\n default_function,\n runtime_only):\n sub = ['seq', INITIALIZER_LLL]\n add_gas = INITIALIZER_LLL.gas\n for _def in otherfuncs:\n sub.append(\n parse_function(_def, {**{'self': sigs}, **external_contracts}, origcode, global_ctx)\n )\n sub[-1].total_gas += add_gas\n add_gas += 30\n for sig in sig_utils.generate_default_arg_sigs(_def, external_contracts, global_ctx):\n sig.gas = sub[-1].total_gas\n sigs[sig.sig] = sig\n\n # Add fallback function\n if default_function:\n default_func = parse_function(\n default_function[0],\n {**{'self': sigs}, **external_contracts},\n origcode,\n global_ctx,\n )\n sub.append(default_func)\n else:\n sub.append(LLLnode.from_list(['revert', 0, 0], typ=None, annotation='Default function'))\n if runtime_only:\n return sub\n else:\n o.append(['return', 0, ['lll', sub, 0]])\n return o\n\n\n# Main python parse tree => LLL method\ndef parse_tree_to_lll(code, origcode, runtime_only=False, interface_codes=None):\n global_ctx = GlobalContext.get_global_context(code, interface_codes=interface_codes)\n _names_def = [_def.name for _def in global_ctx._defs]\n # Checks for duplicate function names\n if len(set(_names_def)) < len(_names_def):\n raise FunctionDeclarationException(\n \"Duplicate function name: %s\" % (\n [name for name in _names_def if _names_def.count(name) > 1][0]\n )\n )\n _names_events = [_event.target.id for _event in global_ctx._events]\n # Checks for duplicate event names\n if len(set(_names_events)) < len(_names_events):\n raise EventDeclarationException(\n \"Duplicate event name: %s\" % (\n [name for name in _names_events if _names_events.count(name) > 1][0]\n )\n )\n # Initialization function\n initfunc = [_def for _def in global_ctx._defs if is_initializer(_def)]\n # Default function\n defaultfunc = [_def for _def in global_ctx._defs if is_default_func(_def)]\n # Regular functions\n otherfuncs = [\n _def\n for _def\n in global_ctx._defs\n if not is_initializer(_def) and not is_default_func(_def)\n ]\n sigs = {}\n external_contracts = {}\n # Create the main statement\n o = ['seq']\n if global_ctx._events:\n sigs = parse_events(sigs, global_ctx)\n if global_ctx._contracts or global_ctx._interfaces:\n external_contracts = parse_external_contracts(external_contracts, global_ctx)\n # If there is an init func...\n if initfunc:\n o.append(INITIALIZER_LLL)\n o.append(\n parse_function(\n initfunc[0],\n {**{'self': sigs}, **external_contracts},\n origcode,\n global_ctx,\n )\n )\n # If there are regular functions...\n if otherfuncs or defaultfunc:\n o = parse_other_functions(\n o, otherfuncs, sigs, external_contracts, origcode, global_ctx, defaultfunc, runtime_only\n )\n\n # Check if interface of contract is correct.\n check_valid_contract_interface(global_ctx, sigs)\n\n return LLLnode.from_list(o, typ=None)\n\n\ndef parse_to_lll(kode, runtime_only=False, interface_codes=None):\n code = parse_to_ast(kode)\n return parse_tree_to_lll(code, kode, runtime_only=runtime_only, interface_codes=interface_codes)\n", "path": "vyper/parser/parser.py"}]} | 3,062 | 564 |
gh_patches_debug_16700 | rasdani/github-patches | git_diff | Kinto__kinto-251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 instead of 503 when storage backend fails with default bucket
Currently we do not catch a potential backend error when creating the implicit objects. And we use a reference to `bucket` and `collection` in the `finally` close...
```
File "/home/travis/build/Kinto/kinto/kinto/views/buckets.py", line 155, in default_bucket
create_collection(request, bucket_id)
File "/home/travis/build/Kinto/kinto/kinto/views/buckets.py", line 123, in create_collection
already_created[collection_uri] = collection
UnboundLocalError: local variable 'collection' referenced before assignment;
```
</issue>
<code>
[start of kinto/views/buckets.py]
1 from six import text_type
2 from uuid import UUID
3
4 from pyramid import httpexceptions
5 from pyramid.settings import asbool
6 from pyramid.security import NO_PERMISSION_REQUIRED
7 from pyramid.view import view_config
8
9 from cliquet import resource
10 from cliquet.utils import hmac_digest, build_request, reapply_cors
11 from cliquet.storage import exceptions as storage_exceptions
12
13 from kinto.authorization import RouteFactory
14 from kinto.views import NameGenerator
15 from kinto.views.collections import Collection
16
17
18 @resource.register(name='bucket',
19 collection_methods=('GET', 'POST'),
20 collection_path='/buckets',
21 record_path='/buckets/{{id}}')
22 class Bucket(resource.ProtectedResource):
23 permissions = ('read', 'write', 'collection:create', 'group:create')
24
25 def __init__(self, *args, **kwargs):
26 super(Bucket, self).__init__(*args, **kwargs)
27 self.model.id_generator = NameGenerator()
28
29 def get_parent_id(self, request):
30 # Buckets are not isolated by user, unlike Cliquet resources.
31 return ''
32
33 def delete(self):
34 result = super(Bucket, self).delete()
35
36 # Delete groups.
37 storage = self.model.storage
38 parent_id = '/buckets/%s' % self.record_id
39 storage.delete_all(collection_id='group',
40 parent_id=parent_id,
41 with_deleted=False)
42 storage.purge_deleted(collection_id='group',
43 parent_id=parent_id)
44
45 # Delete collections.
46 deleted = storage.delete_all(collection_id='collection',
47 parent_id=parent_id,
48 with_deleted=False)
49 storage.purge_deleted(collection_id='collection',
50 parent_id=parent_id)
51
52 # Delete records.
53 id_field = self.model.id_field
54 for collection in deleted:
55 parent_id = '/buckets/%s/collections/%s' % (self.record_id,
56 collection[id_field])
57 storage.delete_all(collection_id='record',
58 parent_id=parent_id,
59 with_deleted=False)
60 storage.purge_deleted(collection_id='record', parent_id=parent_id)
61
62 return result
63
64
65 def create_bucket(request, bucket_id):
66 """Create a bucket if it doesn't exists."""
67 bucket_put = (request.method.lower() == 'put' and
68 request.path.endswith('buckets/default'))
69 # Do nothing if current request will already create the bucket.
70 if bucket_put:
71 return
72
73 # Do not intent to create multiple times per request (e.g. in batch).
74 already_created = request.bound_data.setdefault('buckets', {})
75 if bucket_id in already_created:
76 return
77
78 # Fake context to instantiate a Bucket resource.
79 context = RouteFactory(request)
80 context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id
81 resource = Bucket(request, context)
82 try:
83 bucket = resource.model.create_record({'id': bucket_id})
84 except storage_exceptions.UnicityError as e:
85 bucket = e.record
86 finally:
87 already_created[bucket_id] = bucket
88
89
90 def create_collection(request, bucket_id):
91 # Do nothing if current request does not involve a collection.
92 subpath = request.matchdict.get('subpath')
93 if not (subpath and subpath.startswith('collections/')):
94 return
95
96 collection_id = subpath.split('/')[1]
97 collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)
98
99 # Do not intent to create multiple times per request (e.g. in batch).
100 already_created = request.bound_data.setdefault('collections', {})
101 if collection_uri in already_created:
102 return
103
104 # Do nothing if current request will already create the collection.
105 collection_put = (request.method.lower() == 'put' and
106 request.path.endswith(collection_id))
107 if collection_put:
108 return
109
110 # Fake context to instantiate a Collection resource.
111 context = RouteFactory(request)
112 context.get_permission_object_id = lambda r, i: collection_uri
113
114 backup = request.matchdict
115 request.matchdict = dict(bucket_id=bucket_id,
116 id=collection_id,
117 **request.matchdict)
118 resource = Collection(request, context)
119 try:
120 collection = resource.model.create_record({'id': collection_id})
121 except storage_exceptions.UnicityError as e:
122 collection = e.record
123 finally:
124 already_created[collection_uri] = collection
125 request.matchdict = backup
126
127
128 @view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)
129 @view_config(route_name='default_bucket_collection',
130 permission=NO_PERMISSION_REQUIRED)
131 def default_bucket(request):
132 if request.method.lower() == 'options':
133 path = request.path.replace('default', 'unknown')
134 subrequest = build_request(request, {
135 'method': 'OPTIONS',
136 'path': path
137 })
138 return request.invoke_subrequest(subrequest)
139
140 if getattr(request, 'prefixed_userid', None) is None:
141 # Pass through the forbidden_view_config
142 raise httpexceptions.HTTPForbidden()
143
144 settings = request.registry.settings
145
146 if asbool(settings['readonly']):
147 raise httpexceptions.HTTPMethodNotAllowed()
148
149 hmac_secret = settings['userid_hmac_secret']
150 # Build the user unguessable bucket_id UUID from its user_id
151 digest = hmac_digest(hmac_secret, request.prefixed_userid)
152 bucket_id = text_type(UUID(digest[:32]))
153 path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)
154 querystring = request.url[(request.url.index(request.path) +
155 len(request.path)):]
156
157 # Make sure bucket exists
158 create_bucket(request, bucket_id)
159
160 # Make sure the collection exists
161 create_collection(request, bucket_id)
162
163 subrequest = build_request(request, {
164 'method': request.method,
165 'path': path + querystring,
166 'body': request.body
167 })
168 subrequest.bound_data = request.bound_data
169
170 try:
171 response = request.invoke_subrequest(subrequest)
172 except httpexceptions.HTTPException as error:
173 if error.content_type == 'application/json':
174 response = reapply_cors(subrequest, error)
175 else:
176 # Ask the upper level to format the error.
177 raise error
178 return response
179
[end of kinto/views/buckets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py
--- a/kinto/views/buckets.py
+++ b/kinto/views/buckets.py
@@ -83,8 +83,7 @@
bucket = resource.model.create_record({'id': bucket_id})
except storage_exceptions.UnicityError as e:
bucket = e.record
- finally:
- already_created[bucket_id] = bucket
+ already_created[bucket_id] = bucket
def create_collection(request, bucket_id):
@@ -120,8 +119,7 @@
collection = resource.model.create_record({'id': collection_id})
except storage_exceptions.UnicityError as e:
collection = e.record
- finally:
- already_created[collection_uri] = collection
+ already_created[collection_uri] = collection
request.matchdict = backup
| {"golden_diff": "diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py\n--- a/kinto/views/buckets.py\n+++ b/kinto/views/buckets.py\n@@ -83,8 +83,7 @@\n bucket = resource.model.create_record({'id': bucket_id})\n except storage_exceptions.UnicityError as e:\n bucket = e.record\n- finally:\n- already_created[bucket_id] = bucket\n+ already_created[bucket_id] = bucket\n \n \n def create_collection(request, bucket_id):\n@@ -120,8 +119,7 @@\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n collection = e.record\n- finally:\n- already_created[collection_uri] = collection\n+ already_created[collection_uri] = collection\n request.matchdict = backup\n", "issue": "500 instead of 503 when storage backend fails with default bucket\nCurrently we do not catch a potential backend error when creating the implicit objects. And we use a reference to `bucket` and `collection` in the `finally` close...\n\n```\n File \"/home/travis/build/Kinto/kinto/kinto/views/buckets.py\", line 155, in default_bucket\n create_collection(request, bucket_id)\n File \"/home/travis/build/Kinto/kinto/kinto/views/buckets.py\", line 123, in create_collection\n already_created[collection_uri] = collection\nUnboundLocalError: local variable 'collection' referenced before assignment;\n```\n\n", "before_files": [{"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request, reapply_cors\nfrom cliquet.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views import NameGenerator\nfrom kinto.views.collections import Collection\n\n\[email protected](name='bucket',\n collection_methods=('GET', 'POST'),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.model.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.model.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='group',\n parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.model.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n # Fake context to instantiate a Bucket resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n resource = Bucket(request, context)\n try:\n bucket = resource.model.create_record({'id': bucket_id})\n except storage_exceptions.UnicityError as e:\n bucket = e.record\n finally:\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n # Fake context to instantiate a Collection resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: collection_uri\n\n backup = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n resource = Collection(request, context)\n try:\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n collection = e.record\n finally:\n already_created[collection_uri] = collection\n request.matchdict = backup\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\n@view_config(route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n hmac_secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n if error.content_type == 'application/json':\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n", "path": "kinto/views/buckets.py"}]} | 2,433 | 188 |
gh_patches_debug_9901 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AZURE_12: Retention policy of 0 is not supported
**Describe the bug**
When setting the `retention_period.days` value on `azurerm_network_watcher_flow_log` to `0`, `CKV_AZURE_12` still fails. `0` is the value to signify indefinite/forever retention.
**To Reproduce**
Steps to reproduce the behavior:
1. Create an `azurerm_network_watcher_flow_log` resource
2. Set
```
retention_policy {
enabled = true
days = 0
}
```
3. Run `checkov`
4. Receive failure for `CKV_AZURE_12`
**Expected behavior**
`0` is an accepted value as documented at https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview#how-logging-works
**Desktop (please complete the following information):**
- OS: Linux
- Checkov Version: 2.0.26
**Additional context**
At https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py#L17 likely need to adjust the innermost `if` statement to something like:
```python
if retention_in_days is not None and (retention_days == 0 or retention_days >= 90):
```
Happy to open a pull request if this is an acceptable solution.
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.common.util.type_forcers import force_int
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
4
5
6 class NetworkWatcherFlowLogPeriod(BaseResourceCheck):
7 def __init__(self):
8 name = "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'"
9 id = "CKV_AZURE_12"
10 supported_resources = ['azurerm_network_watcher_flow_log']
11 categories = [CheckCategories.LOGGING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if 'enabled' in conf and conf['enabled'][0]:
16 retention_block = conf['retention_policy'][0]
17 if retention_block['enabled'][0]:
18 retention_in_days = force_int(retention_block['days'][0])
19 if retention_in_days and retention_in_days >= 90:
20 return CheckResult.PASSED
21 return CheckResult.FAILED
22
23
24 check = NetworkWatcherFlowLogPeriod()
25
[end of checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
--- a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
+++ b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
@@ -16,7 +16,7 @@
retention_block = conf['retention_policy'][0]
if retention_block['enabled'][0]:
retention_in_days = force_int(retention_block['days'][0])
- if retention_in_days and retention_in_days >= 90:
+ if retention_in_days is not None and (retention_in_days == 0 or retention_in_days >= 90):
return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n--- a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n+++ b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n@@ -16,7 +16,7 @@\n retention_block = conf['retention_policy'][0]\n if retention_block['enabled'][0]:\n retention_in_days = force_int(retention_block['days'][0])\n- if retention_in_days and retention_in_days >= 90:\n+ if retention_in_days is not None and (retention_in_days == 0 or retention_in_days >= 90):\n return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "CKV_AZURE_12: Retention policy of 0 is not supported\n**Describe the bug**\r\nWhen setting the `retention_period.days` value on `azurerm_network_watcher_flow_log` to `0`, `CKV_AZURE_12` still fails. `0` is the value to signify indefinite/forever retention.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create an `azurerm_network_watcher_flow_log` resource\r\n2. Set\r\n```\r\n retention_policy {\r\n enabled = true\r\n days = 0\r\n }\r\n```\r\n3. Run `checkov`\r\n4. Receive failure for `CKV_AZURE_12`\r\n\r\n**Expected behavior**\r\n`0` is an accepted value as documented at https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview#how-logging-works\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux\r\n - Checkov Version: 2.0.26\r\n\r\n\r\n**Additional context**\r\nAt https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py#L17 likely need to adjust the innermost `if` statement to something like:\r\n\r\n```python\r\nif retention_in_days is not None and (retention_days == 0 or retention_days >= 90):\r\n```\r\n\r\nHappy to open a pull request if this is an acceptable solution.\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_int\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass NetworkWatcherFlowLogPeriod(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'\"\n id = \"CKV_AZURE_12\"\n supported_resources = ['azurerm_network_watcher_flow_log']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'enabled' in conf and conf['enabled'][0]:\n retention_block = conf['retention_policy'][0]\n if retention_block['enabled'][0]:\n retention_in_days = force_int(retention_block['days'][0])\n if retention_in_days and retention_in_days >= 90:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = NetworkWatcherFlowLogPeriod()\n", "path": "checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py"}]} | 1,154 | 182 |
gh_patches_debug_30688 | rasdani/github-patches | git_diff | joke2k__faker-592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reseed code broke in 0.8 unnecessarily
Commit ea4f189bbff1925d7a7e2d7cbc6e91e2e8a9a3f3 changed the name of the `random.Random()` object in `faker.generator` from `random` to `mod_random`, re-exposing the `random` module as the name `faker.generator.random`. This broke code like:
```py
from faker.generator import random
random.seed(1)
```
which is basically what my `pytest-randomly` plugin does. At first I thought this was #586 but then I saw that every run was broken. The failure is silent because `random` is still a name in `faker.generator`, it just now points at the global `random` module 😱
I suggest just doing some renaming in the module to fix this so old code continues to work.
</issue>
<code>
[start of faker/utils/distribution.py]
1 # coding=utf-8
2
3 import bisect
4 from faker.generator import mod_random
5
6 def random_sample(random=None):
7 if random is None:
8 random = mod_random
9 return random.uniform(0.0, 1.0)
10
11
12 def cumsum(it):
13 total = 0
14 for x in it:
15 total += x
16 yield total
17
18
19 def choice_distribution(a, p, random=None):
20 if random is None:
21 random = mod_random
22
23 assert len(a) == len(p)
24
25 if hasattr(random, 'choices'):
26 return random.choices(a, weights=p)[0]
27 else:
28 cdf = list(cumsum(p))
29 normal = cdf[-1]
30 cdf2 = [float(i) / float(normal) for i in cdf]
31 uniform_sample = random_sample(random=random)
32 idx = bisect.bisect_right(cdf2, uniform_sample)
33 return a[idx]
34
[end of faker/utils/distribution.py]
[start of faker/generator.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6 import random
7
8
9 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
10 mod_random = random.Random()
11
12
13 class Generator(object):
14
15 __config = {}
16
17 def __init__(self, **config):
18 self.providers = []
19 self.__config = dict(
20 list(self.__config.items()) + list(config.items()))
21 self.__random = mod_random
22
23 def add_provider(self, provider):
24
25 if type(provider) is type:
26 provider = provider(self)
27
28 self.providers.insert(0, provider)
29
30 for method_name in dir(provider):
31 # skip 'private' method
32 if method_name.startswith('_'):
33 continue
34
35 faker_function = getattr(provider, method_name)
36
37 if hasattr(faker_function, '__call__') or \
38 isinstance(faker_function, (classmethod, staticmethod)):
39 # add all faker method to generator
40 self.set_formatter(method_name, faker_function)
41
42 def provider(self, name):
43 try:
44 lst = [p for p in self.get_providers()
45 if p.__provider__ == name.lower()]
46 return lst[0]
47 except IndexError:
48 return None
49
50 def get_providers(self):
51 """Returns added providers."""
52 return self.providers
53
54 @property
55 def random(self):
56 return self.__random
57
58 def seed_instance(self, seed=None):
59 """Calls random.seed"""
60 if self.__random == mod_random:
61 # create per-instance random obj when first time seed_instance() is called
62 self.__random = random.Random()
63 self.__random.seed(seed)
64
65 @classmethod
66 def seed(cls, seed=None):
67 mod_random.seed(seed)
68
69 def format(self, formatter, *args, **kwargs):
70 """
71 This is a secure way to make a fake from another Provider.
72 """
73 # TODO: data export?
74 return self.get_formatter(formatter)(*args, **kwargs)
75
76 def get_formatter(self, formatter):
77 try:
78 return getattr(self, formatter)
79 except AttributeError:
80 if 'locale' in self.__config:
81 msg = 'Unknown formatter "{0}" with locale "{1}"'.format(
82 formatter, self.__config['locale']
83 )
84 else:
85 raise AttributeError('Unknown formatter "{0}"'.format(
86 formatter
87 ))
88 raise AttributeError(msg)
89
90 def set_formatter(self, name, method):
91 """
92 This method adds a provider method to generator.
93 Override this method to add some decoration or logging stuff.
94 """
95 setattr(self, name, method)
96
97 def parse(self, text):
98 """
99 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
100 with the result from the token method call.
101 """
102 return _re_token.sub(self.__format_token, text)
103
104 def __format_token(self, matches):
105 formatter = list(matches.groups())
106 formatter[1] = self.format(formatter[1])
107 return ''.join(formatter)
108
[end of faker/generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -3,11 +3,12 @@
from __future__ import unicode_literals
import re
-import random
+import random as random_module
_re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
-mod_random = random.Random()
+random = random_module.Random()
+mod_random = random # compat with name released in 0.8
class Generator(object):
@@ -18,7 +19,7 @@
self.providers = []
self.__config = dict(
list(self.__config.items()) + list(config.items()))
- self.__random = mod_random
+ self.__random = random
def add_provider(self, provider):
@@ -57,14 +58,14 @@
def seed_instance(self, seed=None):
"""Calls random.seed"""
- if self.__random == mod_random:
+ if self.__random == random:
# create per-instance random obj when first time seed_instance() is called
- self.__random = random.Random()
+ self.__random = random_module.Random()
self.__random.seed(seed)
@classmethod
def seed(cls, seed=None):
- mod_random.seed(seed)
+ random.seed(seed)
def format(self, formatter, *args, **kwargs):
"""
diff --git a/faker/utils/distribution.py b/faker/utils/distribution.py
--- a/faker/utils/distribution.py
+++ b/faker/utils/distribution.py
@@ -1,7 +1,8 @@
# coding=utf-8
import bisect
-from faker.generator import mod_random
+from faker.generator import random as mod_random
+
def random_sample(random=None):
if random is None:
| {"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -3,11 +3,12 @@\n from __future__ import unicode_literals\n \n import re\n-import random\n+import random as random_module\n \n \n _re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\n-mod_random = random.Random()\n+random = random_module.Random()\n+mod_random = random # compat with name released in 0.8\n \n \n class Generator(object):\n@@ -18,7 +19,7 @@\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n- self.__random = mod_random\n+ self.__random = random\n \n def add_provider(self, provider):\n \n@@ -57,14 +58,14 @@\n \n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n- if self.__random == mod_random:\n+ if self.__random == random:\n # create per-instance random obj when first time seed_instance() is called\n- self.__random = random.Random()\n+ self.__random = random_module.Random()\n self.__random.seed(seed)\n \n @classmethod\n def seed(cls, seed=None):\n- mod_random.seed(seed)\n+ random.seed(seed)\n \n def format(self, formatter, *args, **kwargs):\n \"\"\"\ndiff --git a/faker/utils/distribution.py b/faker/utils/distribution.py\n--- a/faker/utils/distribution.py\n+++ b/faker/utils/distribution.py\n@@ -1,7 +1,8 @@\n # coding=utf-8\n \n import bisect\n-from faker.generator import mod_random\n+from faker.generator import random as mod_random\n+\n \n def random_sample(random=None):\n if random is None:\n", "issue": "Reseed code broke in 0.8 unnecessarily\nCommit ea4f189bbff1925d7a7e2d7cbc6e91e2e8a9a3f3 changed the name of the `random.Random()` object in `faker.generator` from `random` to `mod_random`, re-exposing the `random` module as the name `faker.generator.random`. This broke code like:\r\n\r\n```py\r\nfrom faker.generator import random\r\nrandom.seed(1)\r\n``` \r\n\r\nwhich is basically what my `pytest-randomly` plugin does. At first I thought this was #586 but then I saw that every run was broken. The failure is silent because `random` is still a name in `faker.generator`, it just now points at the global `random` module \ud83d\ude31\r\n\r\nI suggest just doing some renaming in the module to fix this so old code continues to work.\n", "before_files": [{"content": "# coding=utf-8\n\nimport bisect\nfrom faker.generator import mod_random\n\ndef random_sample(random=None):\n if random is None:\n random = mod_random\n return random.uniform(0.0, 1.0)\n\n\ndef cumsum(it):\n total = 0\n for x in it:\n total += x\n yield total\n\n\ndef choice_distribution(a, p, random=None):\n if random is None:\n random = mod_random\n\n assert len(a) == len(p)\n\n if hasattr(random, 'choices'):\n return random.choices(a, weights=p)[0]\n else:\n cdf = list(cumsum(p))\n normal = cdf[-1]\n cdf2 = [float(i) / float(normal) for i in cdf]\n uniform_sample = random_sample(random=random)\n idx = bisect.bisect_right(cdf2, uniform_sample)\n return a[idx]\n", "path": "faker/utils/distribution.py"}, {"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nmod_random = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n self.__random = mod_random\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return self.__random\n\n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n if self.__random == mod_random:\n # create per-instance random obj when first time seed_instance() is called\n self.__random = random.Random()\n self.__random.seed(seed)\n\n @classmethod\n def seed(cls, seed=None):\n mod_random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n if 'locale' in self.__config:\n msg = 'Unknown formatter \"{0}\" with locale \"{1}\"'.format(\n formatter, self.__config['locale']\n )\n else:\n raise AttributeError('Unknown formatter \"{0}\"'.format(\n formatter\n ))\n raise AttributeError(msg)\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py"}]} | 1,872 | 407 |
gh_patches_debug_4387 | rasdani/github-patches | git_diff | falconry__falcon-1593 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Official support for CPython 3.8
CPython 3.8.0 was [released Oct 14, 2019](https://www.python.org/downloads/release/python-380/).
Make Falcon offically support CPython 3.8:
- [ ] README
- [ ] Docker images
- [ ] Docs
- [ ] Tox
- [ ] Travis
- [ ] setup.py
</issue>
<code>
[start of setup.py]
1 import glob
2 import imp
3 import io
4 import os
5 from os import path
6 import re
7 import sys
8
9 from setuptools import Extension, find_packages, setup
10
11 MYDIR = path.abspath(os.path.dirname(__file__))
12
13 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
14 VERSION = VERSION.__version__
15
16 REQUIRES = []
17
18 try:
19 sys.pypy_version_info
20 PYPY = True
21 except AttributeError:
22 PYPY = False
23
24 if PYPY:
25 CYTHON = False
26 else:
27 try:
28 from Cython.Distutils import build_ext
29 CYTHON = True
30 except ImportError:
31 # TODO(kgriffs): pip now ignores all output, so the user
32 # may not see this message. See also:
33 #
34 # https://github.com/pypa/pip/issues/2732
35 #
36 print('\nNOTE: Cython not installed. '
37 'Falcon will still work fine, but may run '
38 'a bit slower.\n')
39 CYTHON = False
40
41 if CYTHON:
42 def list_modules(dirname):
43 filenames = glob.glob(path.join(dirname, '*.py'))
44
45 module_names = []
46 for name in filenames:
47 module, ext = path.splitext(path.basename(name))
48 if module != '__init__':
49 module_names.append(module)
50
51 return module_names
52
53 package_names = [
54 'falcon',
55 'falcon.media',
56 'falcon.routing',
57 'falcon.util',
58 'falcon.vendor.mimeparse',
59 ]
60 ext_modules = [
61 Extension(
62 package + '.' + module,
63 [path.join(*(package.split('.') + [module + '.py']))]
64 )
65 for package in package_names
66 for module in list_modules(path.join(MYDIR, *package.split('.')))
67 ]
68
69 cmdclass = {'build_ext': build_ext}
70
71 else:
72 cmdclass = {}
73 ext_modules = []
74
75
76 def load_description():
77 in_patron_list = False
78 in_patron_replacement = False
79 in_raw = False
80
81 description_lines = []
82
83 # NOTE(kgriffs): PyPI does not support the raw directive
84 for readme_line in io.open('README.rst', 'r', encoding='utf-8'):
85
86 # NOTE(vytas): The patron list largely builds upon raw sections
87 if readme_line.startswith('.. Patron list starts'):
88 in_patron_list = True
89 in_patron_replacement = True
90 continue
91 elif in_patron_list:
92 if not readme_line.strip():
93 in_patron_replacement = False
94 elif in_patron_replacement:
95 description_lines.append(readme_line.lstrip())
96 if readme_line.startswith('.. Patron list ends'):
97 in_patron_list = False
98 continue
99 elif readme_line.startswith('.. raw::'):
100 in_raw = True
101 elif in_raw:
102 if readme_line and not re.match(r'\s', readme_line):
103 in_raw = False
104
105 if not in_raw:
106 description_lines.append(readme_line)
107
108 return ''.join(description_lines)
109
110
111 setup(
112 name='falcon',
113 version=VERSION,
114 description='An unladen web framework for building APIs and app backends.',
115 long_description=load_description(),
116 long_description_content_type='text/x-rst',
117 classifiers=[
118 'Development Status :: 5 - Production/Stable',
119 'Environment :: Web Environment',
120 'Natural Language :: English',
121 'Intended Audience :: Developers',
122 'Intended Audience :: System Administrators',
123 'License :: OSI Approved :: Apache Software License',
124 'Operating System :: MacOS :: MacOS X',
125 'Operating System :: Microsoft :: Windows',
126 'Operating System :: POSIX',
127 'Topic :: Internet :: WWW/HTTP :: WSGI',
128 'Topic :: Software Development :: Libraries :: Application Frameworks',
129 'Programming Language :: Python',
130 'Programming Language :: Python :: Implementation :: CPython',
131 'Programming Language :: Python :: Implementation :: PyPy',
132 'Programming Language :: Python :: 3',
133 'Programming Language :: Python :: 3.5',
134 'Programming Language :: Python :: 3.6',
135 'Programming Language :: Python :: 3.7',
136 ],
137 keywords='wsgi web api framework rest http cloud',
138 author='Kurt Griffiths',
139 author_email='[email protected]',
140 url='https://falconframework.org',
141 license='Apache 2.0',
142 packages=find_packages(exclude=['tests']),
143 include_package_data=True,
144 zip_safe=False,
145 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
146 install_requires=REQUIRES,
147 cmdclass=cmdclass,
148 ext_modules=ext_modules,
149 tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],
150 entry_points={
151 'console_scripts': [
152 'falcon-bench = falcon.cmd.bench:main',
153 'falcon-print-routes = falcon.cmd.print_routes:main'
154 ]
155 }
156 )
157
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -133,6 +133,7 @@
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
],
keywords='wsgi web api framework rest http cloud',
author='Kurt Griffiths',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -133,6 +133,7 @@\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n+ 'Programming Language :: Python :: 3.8',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n", "issue": "Official support for CPython 3.8\nCPython 3.8.0 was [released Oct 14, 2019](https://www.python.org/downloads/release/python-380/).\r\n\r\nMake Falcon offically support CPython 3.8:\r\n- [ ] README\r\n- [ ] Docker images\r\n- [ ] Docs\r\n- [ ] Tox\r\n- [ ] Travis\r\n- [ ] setup.py\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\nREQUIRES = []\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = [\n 'falcon',\n 'falcon.media',\n 'falcon.routing',\n 'falcon.util',\n 'falcon.vendor.mimeparse',\n ]\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_patron_list = False\n in_patron_replacement = False\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n\n # NOTE(vytas): The patron list largely builds upon raw sections\n if readme_line.startswith('.. Patron list starts'):\n in_patron_list = True\n in_patron_replacement = True\n continue\n elif in_patron_list:\n if not readme_line.strip():\n in_patron_replacement = False\n elif in_patron_replacement:\n description_lines.append(readme_line.lstrip())\n if readme_line.startswith('.. Patron list ends'):\n in_patron_list = False\n continue\n elif readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match(r'\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='https://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 2,085 | 105 |
gh_patches_debug_19281 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'` with PySide 6.4.0.1 application
<!--
Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller
- For questions and general support, use the discussions forum.
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
-->
<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->
## Description of the issue
When running a frozen PySide6 application using PySide 6.4.0.1, the `|` operator between Qt.KeyboardModifier and Qt.Key stops working, for example:
```
Traceback (most recent call last):
File "script.py", line 28, in <module>
window = MainWindow()
File "script.py", line 18, in __init__
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'
[10109] Failed to execute script 'script' due to unhandled exception!
```
Note that the script used for this works just fine if invoked directly from a development environment, and it works both in a development and a frozen build if downgraded to 6.3.2.
Before filing this I made a repo to demonstrate the bug, but most of the info is included in this issue template already. https://github.com/twizmwazin/pyside-pyinstaller-unsupported-operand-bug
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.6.2```
* Version of Python: Python 3.10.8
* Platform: Ubuntu 20.04 on WSL 2
* How you installed Python: Python 3.10.8 from source
* Did you also try this on another platform? Does it work there?
First discovered after upgrading to PySide 6.4.0.1 in Azure DevOps CI using Ubuntu 20.04 and MS's build of Python 3.10. Also reproduced on Windows 11 using Python 3.10.8 64-bit installed using the python.org installer.
* try the latest development version, using the following command:
```shell
pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
```
* follow *all* the instructions in our "If Things Go Wrong" Guide
(https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
* [x] start with clean installation
* [x] use the latest development version
* [x] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
* [x] Package your program in **--onedir mode**
* [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
* [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
### A minimal example program which shows the error
```
#!/usr/bin/env python3
from PySide6.QtCore import Qt
from PySide6.QtGui import QAction, QKeySequence
from PySide6.QtWidgets import QApplication, QToolBar, QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle("My Awesome App")
toolbar = QToolBar("My main toolbar")
self.addToolBar(toolbar)
button_action = QAction("Your button", self)
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
button_action.triggered.connect(self.onMyToolBarButtonClick)
toolbar.addAction(button_action)
def onMyToolBarButtonClick(self, s):
print("click", s)
if __name__ == "__main__":
app = QApplication()
window = MainWindow()
window.show()
app.exec()
```
### Stacktrace / full error message
```
Traceback (most recent call last):
File "script.py", line 31, in <module>
window = MainWindow()
File "script.py", line 21, in __init__
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'
```
Please also see <https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs>
for more about what would use to solve the issue.
</issue>
<code>
[start of PyInstaller/hooks/hook-PySide6.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2021-2022, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info
13
14 # Only proceed if PySide6 can be imported.
15 if pyside6_library_info.version is not None:
16 hiddenimports = ['shiboken6', 'inspect']
17
18 # Collect required Qt binaries.
19 binaries = get_qt_binaries(pyside6_library_info)
20
[end of PyInstaller/hooks/hook-PySide6.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-PySide6.py b/PyInstaller/hooks/hook-PySide6.py
--- a/PyInstaller/hooks/hook-PySide6.py
+++ b/PyInstaller/hooks/hook-PySide6.py
@@ -9,11 +9,17 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
+from PyInstaller.utils.hooks import is_module_satisfies
from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info
# Only proceed if PySide6 can be imported.
if pyside6_library_info.version is not None:
hiddenimports = ['shiboken6', 'inspect']
+ # Starting with PySide6 6.4.0, we need to collect PySide6.support.deprecated for | and & operators to work with
+ # Qt key and key modifiers enums. See #7249.
+ if is_module_satisfies("PySide6 >= 6.4.0"):
+ hiddenimports += ['PySide6.support.deprecated']
+
# Collect required Qt binaries.
binaries = get_qt_binaries(pyside6_library_info)
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PySide6.py b/PyInstaller/hooks/hook-PySide6.py\n--- a/PyInstaller/hooks/hook-PySide6.py\n+++ b/PyInstaller/hooks/hook-PySide6.py\n@@ -9,11 +9,17 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n+from PyInstaller.utils.hooks import is_module_satisfies\n from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info\n \n # Only proceed if PySide6 can be imported.\n if pyside6_library_info.version is not None:\n hiddenimports = ['shiboken6', 'inspect']\n \n+ # Starting with PySide6 6.4.0, we need to collect PySide6.support.deprecated for | and & operators to work with\n+ # Qt key and key modifiers enums. See #7249.\n+ if is_module_satisfies(\"PySide6 >= 6.4.0\"):\n+ hiddenimports += ['PySide6.support.deprecated']\n+\n # Collect required Qt binaries.\n binaries = get_qt_binaries(pyside6_library_info)\n", "issue": "`TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'` with PySide 6.4.0.1 application\n<!--\r\nWelcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller\r\n - For questions and general support, use the discussions forum.\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n-->\r\n\r\n<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->\r\n\r\n## Description of the issue\r\n\r\nWhen running a frozen PySide6 application using PySide 6.4.0.1, the `|` operator between Qt.KeyboardModifier and Qt.Key stops working, for example:\r\n```\r\nTraceback (most recent call last):\r\n File \"script.py\", line 28, in <module>\r\n window = MainWindow()\r\n File \"script.py\", line 18, in __init__\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\nTypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'\r\n[10109] Failed to execute script 'script' due to unhandled exception!\r\n```\r\n\r\nNote that the script used for this works just fine if invoked directly from a development environment, and it works both in a development and a frozen build if downgraded to 6.3.2.\r\n\r\nBefore filing this I made a repo to demonstrate the bug, but most of the info is included in this issue template already. https://github.com/twizmwazin/pyside-pyinstaller-unsupported-operand-bug\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.6.2```\r\n* Version of Python: Python 3.10.8\r\n* Platform: Ubuntu 20.04 on WSL 2\r\n* How you installed Python: Python 3.10.8 from source\r\n* Did you also try this on another platform? Does it work there?\r\nFirst discovered after upgrading to PySide 6.4.0.1 in Azure DevOps CI using Ubuntu 20.04 and MS's build of Python 3.10. Also reproduced on Windows 11 using Python 3.10.8 64-bit installed using the python.org installer.\r\n\r\n\r\n* try the latest development version, using the following command:\r\n\r\n```shell\r\npip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n```\r\n\r\n* follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n\r\n### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n * [x] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n * [x] Package your program in **--onedir mode**\r\n * [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n * [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\n#!/usr/bin/env python3\r\n\r\nfrom PySide6.QtCore import Qt\r\nfrom PySide6.QtGui import QAction, QKeySequence\r\nfrom PySide6.QtWidgets import QApplication, QToolBar, QMainWindow\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n\r\n self.setWindowTitle(\"My Awesome App\")\r\n\r\n toolbar = QToolBar(\"My main toolbar\")\r\n self.addToolBar(toolbar)\r\n\r\n button_action = QAction(\"Your button\", self)\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\n button_action.triggered.connect(self.onMyToolBarButtonClick)\r\n toolbar.addAction(button_action)\r\n\r\n def onMyToolBarButtonClick(self, s):\r\n print(\"click\", s)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication()\r\n window = MainWindow()\r\n window.show()\r\n app.exec()\r\n```\r\n\r\n### Stacktrace / full error message\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"script.py\", line 31, in <module>\r\n window = MainWindow()\r\n File \"script.py\", line 21, in __init__\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\nTypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'\r\n```\r\n\r\nPlease also see <https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs>\r\nfor more about what would use to solve the issue.\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info\n\n# Only proceed if PySide6 can be imported.\nif pyside6_library_info.version is not None:\n hiddenimports = ['shiboken6', 'inspect']\n\n # Collect required Qt binaries.\n binaries = get_qt_binaries(pyside6_library_info)\n", "path": "PyInstaller/hooks/hook-PySide6.py"}]} | 1,881 | 269 |
gh_patches_debug_26508 | rasdani/github-patches | git_diff | gammapy__gammapy-1567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What's the correct value for errordef in iminuit?
see https://github.com/gammapy/gammapy/blob/master/gammapy/utils/fitting/iminuit.py#L88
Reminder:
out fit statistic is -2 log (likelihood)
https://github.com/gammapy/gammapy/blob/master/gammapy/stats/fit_statistics.py#L58
</issue>
<code>
[start of gammapy/utils/fitting/iminuit.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """iminuit fitting functions.
3 """
4 from __future__ import absolute_import, division, print_function, unicode_literals
5 import numpy as np
6
7 __all__ = [
8 'fit_iminuit',
9 ]
10
11
12 def fit_iminuit(parameters, function, opts_minuit=None):
13 """iminuit optimization
14
15 Parameters
16 ----------
17 parameters : `~gammapy.utils.modeling.ParameterList`
18 Parameters with starting values
19 function : callable
20 Likelihood function
21 opts_minuit : dict (optional)
22 Options passed to `iminuit.Minuit` constructor
23
24 Returns
25 -------
26 parameters : `~gammapy.utils.modeling.ParameterList`
27 Parameters with best-fit values
28 minuit : `~iminuit.Minuit`
29 Minuit object
30 """
31 from iminuit import Minuit
32
33 minuit_func = MinuitFunction(function, parameters)
34
35 if opts_minuit is None:
36 opts_minuit = {}
37 opts_minuit.update(make_minuit_par_kwargs(parameters))
38
39 minuit = Minuit(minuit_func.fcn,
40 forced_parameters=parameters.names,
41 **opts_minuit)
42
43 minuit.migrad()
44 parameters.covariance = _get_covar(minuit)
45
46 return parameters, minuit
47
48
49 class MinuitFunction(object):
50 """Wrapper for iminuit
51
52 Parameters
53 ----------
54 parameters : `~gammapy.utils.modeling.ParameterList`
55 Parameters with starting values
56 function : callable
57 Likelihood function
58 """
59
60 def __init__(self, function, parameters):
61 self.function = function
62 self.parameters = parameters
63
64 def fcn(self, *values):
65 for value, parameter in zip(values, self.parameters.parameters):
66 parameter.value = value
67 return self.function(self.parameters)
68
69
70 def make_minuit_par_kwargs(parameters):
71 """Create *Parameter Keyword Arguments* for the `Minuit` constructor.
72
73 See: http://iminuit.readthedocs.io/en/latest/api.html#iminuit.Minuit
74 """
75 kwargs = {}
76 for par in parameters.parameters:
77 kwargs[par.name] = par.value
78 if par.frozen:
79 kwargs['fix_{}'.format(par.name)] = True
80 min_ = None if np.isnan(par.min) else par.min
81 max_ = None if np.isnan(par.max) else par.max
82 kwargs['limit_{}'.format(par.name)] = (min_, max_)
83
84 if parameters.covariance is None:
85 kwargs['error_{}'.format(par.name)] = 1
86 else:
87 kwargs['error_{}'.format(par.name)] = parameters.error(par.name)
88
89 # TODO: Check if we need 0.5 or 1
90 kwargs['errordef'] = 1
91
92 return kwargs
93
94
95 def _get_covar(minuit):
96 """Get full covar matrix as Numpy array.
97
98 This was added as `minuit.np_covariance` in `iminuit` in v1.3,
99 but we still want to support v1.2
100 """
101 n = len(minuit.parameters)
102 m = np.zeros((n, n))
103 for i1, k1 in enumerate(minuit.parameters):
104 for i2, k2 in enumerate(minuit.parameters):
105 if set([k1, k2]).issubset(minuit.list_of_vary_param()):
106 m[i1, i2] = minuit.covariance[(k1, k2)]
107 return m
108
[end of gammapy/utils/fitting/iminuit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/utils/fitting/iminuit.py b/gammapy/utils/fitting/iminuit.py
--- a/gammapy/utils/fitting/iminuit.py
+++ b/gammapy/utils/fitting/iminuit.py
@@ -36,6 +36,10 @@
opts_minuit = {}
opts_minuit.update(make_minuit_par_kwargs(parameters))
+ # In Gammapy, we have the factor 2 in the likelihood function
+ # This means `errordef=1` in the Minuit interface is correct
+ opts_minuit.setdefault('errordef', 1)
+
minuit = Minuit(minuit_func.fcn,
forced_parameters=parameters.names,
**opts_minuit)
@@ -75,8 +79,7 @@
kwargs = {}
for par in parameters.parameters:
kwargs[par.name] = par.value
- if par.frozen:
- kwargs['fix_{}'.format(par.name)] = True
+
min_ = None if np.isnan(par.min) else par.min
max_ = None if np.isnan(par.max) else par.max
kwargs['limit_{}'.format(par.name)] = (min_, max_)
@@ -86,8 +89,8 @@
else:
kwargs['error_{}'.format(par.name)] = parameters.error(par.name)
- # TODO: Check if we need 0.5 or 1
- kwargs['errordef'] = 1
+ if par.frozen:
+ kwargs['fix_{}'.format(par.name)] = True
return kwargs
| {"golden_diff": "diff --git a/gammapy/utils/fitting/iminuit.py b/gammapy/utils/fitting/iminuit.py\n--- a/gammapy/utils/fitting/iminuit.py\n+++ b/gammapy/utils/fitting/iminuit.py\n@@ -36,6 +36,10 @@\n opts_minuit = {}\n opts_minuit.update(make_minuit_par_kwargs(parameters))\n \n+ # In Gammapy, we have the factor 2 in the likelihood function\n+ # This means `errordef=1` in the Minuit interface is correct\n+ opts_minuit.setdefault('errordef', 1)\n+\n minuit = Minuit(minuit_func.fcn,\n forced_parameters=parameters.names,\n **opts_minuit)\n@@ -75,8 +79,7 @@\n kwargs = {}\n for par in parameters.parameters:\n kwargs[par.name] = par.value\n- if par.frozen:\n- kwargs['fix_{}'.format(par.name)] = True\n+\n min_ = None if np.isnan(par.min) else par.min\n max_ = None if np.isnan(par.max) else par.max\n kwargs['limit_{}'.format(par.name)] = (min_, max_)\n@@ -86,8 +89,8 @@\n else:\n kwargs['error_{}'.format(par.name)] = parameters.error(par.name)\n \n- # TODO: Check if we need 0.5 or 1\n- kwargs['errordef'] = 1\n+ if par.frozen:\n+ kwargs['fix_{}'.format(par.name)] = True\n \n return kwargs\n", "issue": "What's the correct value for errordef in iminuit?\nsee https://github.com/gammapy/gammapy/blob/master/gammapy/utils/fitting/iminuit.py#L88\r\n\r\nReminder:\r\nout fit statistic is -2 log (likelihood)\r\nhttps://github.com/gammapy/gammapy/blob/master/gammapy/stats/fit_statistics.py#L58\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"iminuit fitting functions.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\n\n__all__ = [\n 'fit_iminuit',\n]\n\n\ndef fit_iminuit(parameters, function, opts_minuit=None):\n \"\"\"iminuit optimization\n\n Parameters\n ----------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with starting values\n function : callable\n Likelihood function\n opts_minuit : dict (optional)\n Options passed to `iminuit.Minuit` constructor\n\n Returns\n -------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with best-fit values\n minuit : `~iminuit.Minuit`\n Minuit object\n \"\"\"\n from iminuit import Minuit\n\n minuit_func = MinuitFunction(function, parameters)\n\n if opts_minuit is None:\n opts_minuit = {}\n opts_minuit.update(make_minuit_par_kwargs(parameters))\n\n minuit = Minuit(minuit_func.fcn,\n forced_parameters=parameters.names,\n **opts_minuit)\n\n minuit.migrad()\n parameters.covariance = _get_covar(minuit)\n\n return parameters, minuit\n\n\nclass MinuitFunction(object):\n \"\"\"Wrapper for iminuit\n\n Parameters\n ----------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with starting values\n function : callable\n Likelihood function\n \"\"\"\n\n def __init__(self, function, parameters):\n self.function = function\n self.parameters = parameters\n\n def fcn(self, *values):\n for value, parameter in zip(values, self.parameters.parameters):\n parameter.value = value\n return self.function(self.parameters)\n\n\ndef make_minuit_par_kwargs(parameters):\n \"\"\"Create *Parameter Keyword Arguments* for the `Minuit` constructor.\n\n See: http://iminuit.readthedocs.io/en/latest/api.html#iminuit.Minuit\n \"\"\"\n kwargs = {}\n for par in parameters.parameters:\n kwargs[par.name] = par.value\n if par.frozen:\n kwargs['fix_{}'.format(par.name)] = True\n min_ = None if np.isnan(par.min) else par.min\n max_ = None if np.isnan(par.max) else par.max\n kwargs['limit_{}'.format(par.name)] = (min_, max_)\n\n if parameters.covariance is None:\n kwargs['error_{}'.format(par.name)] = 1\n else:\n kwargs['error_{}'.format(par.name)] = parameters.error(par.name)\n\n # TODO: Check if we need 0.5 or 1\n kwargs['errordef'] = 1\n\n return kwargs\n\n\ndef _get_covar(minuit):\n \"\"\"Get full covar matrix as Numpy array.\n\n This was added as `minuit.np_covariance` in `iminuit` in v1.3,\n but we still want to support v1.2\n \"\"\"\n n = len(minuit.parameters)\n m = np.zeros((n, n))\n for i1, k1 in enumerate(minuit.parameters):\n for i2, k2 in enumerate(minuit.parameters):\n if set([k1, k2]).issubset(minuit.list_of_vary_param()):\n m[i1, i2] = minuit.covariance[(k1, k2)]\n return m\n", "path": "gammapy/utils/fitting/iminuit.py"}]} | 1,611 | 355 |
gh_patches_debug_10886 | rasdani/github-patches | git_diff | certbot__certbot-4243 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--cert-name option not honored on new SSL certificate issuance
Hello,
```shell
root@stretch:~# certbot-auto --version
certbot 0.11.1
```
```shell
# certbot-auto \
certonly \
--non-interactive \
--agree-tos --email <email> \
--staging \
--webroot --webroot-path <path> \
--allow-subset-of-names \
--domain unresolvable.domain.tld \
--domain resolvable.domain.tld \
--cert-name unresolvable.domain.tld
```
Result in:
```shell
No certificate found with name unresolvable.domain.tld (expected /etc/letsencrypt/renewal/unresolvable.domain.tld.conf).
```
But help say (When creating a new certificate ...):
```
--cert-name CERTNAME Certificate name to apply. Only one certificate name
can be used per Certbot run. To see certificate names,
run 'certbot certificates'. When creating a new
certificate, specifies the new certificate's name.
(default: None)
```
Thank you.
</issue>
<code>
[start of certbot/cert_manager.py]
1 """Tools for managing certificates."""
2 import datetime
3 import logging
4 import os
5 import pytz
6 import traceback
7 import zope.component
8
9 from certbot import errors
10 from certbot import interfaces
11 from certbot import ocsp
12 from certbot import storage
13 from certbot import util
14
15 from certbot.display import util as display_util
16
17 logger = logging.getLogger(__name__)
18
19 ###################
20 # Commands
21 ###################
22
23 def update_live_symlinks(config):
24 """Update the certificate file family symlinks to use archive_dir.
25
26 Use the information in the config file to make symlinks point to
27 the correct archive directory.
28
29 .. note:: This assumes that the installation is using a Reverter object.
30
31 :param config: Configuration.
32 :type config: :class:`certbot.configuration.NamespaceConfig`
33
34 """
35 for renewal_file in storage.renewal_conf_files(config):
36 storage.RenewableCert(renewal_file, config, update_symlinks=True)
37
38 def rename_lineage(config):
39 """Rename the specified lineage to the new name.
40
41 :param config: Configuration.
42 :type config: :class:`certbot.configuration.NamespaceConfig`
43
44 """
45 disp = zope.component.getUtility(interfaces.IDisplay)
46
47 certname = _get_certname(config, "rename")
48
49 new_certname = config.new_certname
50 if not new_certname:
51 code, new_certname = disp.input(
52 "Enter the new name for certificate {0}".format(certname),
53 flag="--updated-cert-name", force_interactive=True)
54 if code != display_util.OK or not new_certname:
55 raise errors.Error("User ended interaction.")
56
57 lineage = lineage_for_certname(config, certname)
58 if not lineage:
59 raise errors.ConfigurationError("No existing certificate with name "
60 "{0} found.".format(certname))
61 storage.rename_renewal_config(certname, new_certname, config)
62 disp.notification("Successfully renamed {0} to {1}."
63 .format(certname, new_certname), pause=False)
64
65 def certificates(config):
66 """Display information about certs configured with Certbot
67
68 :param config: Configuration.
69 :type config: :class:`certbot.configuration.NamespaceConfig`
70 """
71 parsed_certs = []
72 parse_failures = []
73 for renewal_file in storage.renewal_conf_files(config):
74 try:
75 renewal_candidate = storage.RenewableCert(renewal_file, config)
76 parsed_certs.append(renewal_candidate)
77 except Exception as e: # pylint: disable=broad-except
78 logger.warning("Renewal configuration file %s produced an "
79 "unexpected error: %s. Skipping.", renewal_file, e)
80 logger.debug("Traceback was:\n%s", traceback.format_exc())
81 parse_failures.append(renewal_file)
82
83 # Describe all the certs
84 _describe_certs(config, parsed_certs, parse_failures)
85
86 def delete(config):
87 """Delete Certbot files associated with a certificate lineage."""
88 certname = _get_certname(config, "delete")
89 storage.delete_files(config, certname)
90 disp = zope.component.getUtility(interfaces.IDisplay)
91 disp.notification("Deleted all files relating to certificate {0}."
92 .format(certname), pause=False)
93
94 ###################
95 # Public Helpers
96 ###################
97
98 def lineage_for_certname(cli_config, certname):
99 """Find a lineage object with name certname."""
100 configs_dir = cli_config.renewal_configs_dir
101 # Verify the directory is there
102 util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
103 renewal_file = storage.renewal_file_for_certname(cli_config, certname)
104 try:
105 return storage.RenewableCert(renewal_file, cli_config)
106 except (errors.CertStorageError, IOError):
107 logger.debug("Renewal conf file %s is broken.", renewal_file)
108 logger.debug("Traceback was:\n%s", traceback.format_exc())
109 return None
110
111 def domains_for_certname(config, certname):
112 """Find the domains in the cert with name certname."""
113 lineage = lineage_for_certname(config, certname)
114 return lineage.names() if lineage else None
115
116 def find_duplicative_certs(config, domains):
117 """Find existing certs that duplicate the request."""
118 def update_certs_for_domain_matches(candidate_lineage, rv):
119 """Return cert as identical_names_cert if it matches,
120 or subset_names_cert if it matches as subset
121 """
122 # TODO: Handle these differently depending on whether they are
123 # expired or still valid?
124 identical_names_cert, subset_names_cert = rv
125 candidate_names = set(candidate_lineage.names())
126 if candidate_names == set(domains):
127 identical_names_cert = candidate_lineage
128 elif candidate_names.issubset(set(domains)):
129 # This logic finds and returns the largest subset-names cert
130 # in the case where there are several available.
131 if subset_names_cert is None:
132 subset_names_cert = candidate_lineage
133 elif len(candidate_names) > len(subset_names_cert.names()):
134 subset_names_cert = candidate_lineage
135 return (identical_names_cert, subset_names_cert)
136
137 return _search_lineages(config, update_certs_for_domain_matches, (None, None))
138
139
140 ###################
141 # Private Helpers
142 ###################
143
144 def _get_certname(config, verb):
145 """Get certname from flag, interactively, or error out.
146 """
147 certname = config.certname
148 if not certname:
149 disp = zope.component.getUtility(interfaces.IDisplay)
150 filenames = storage.renewal_conf_files(config)
151 choices = [storage.lineagename_for_filename(name) for name in filenames]
152 if not choices:
153 raise errors.Error("No existing certificates found.")
154 code, index = disp.menu("Which certificate would you like to {0}?".format(verb),
155 choices, ok_label="Select", flag="--cert-name",
156 force_interactive=True)
157 if code != display_util.OK or not index in range(0, len(choices)):
158 raise errors.Error("User ended interaction.")
159 certname = choices[index]
160 return certname
161
162 def _report_lines(msgs):
163 """Format a results report for a category of single-line renewal outcomes"""
164 return " " + "\n ".join(str(msg) for msg in msgs)
165
166 def _report_human_readable(config, parsed_certs):
167 """Format a results report for a parsed cert"""
168 certinfo = []
169 checker = ocsp.RevocationChecker()
170 for cert in parsed_certs:
171 if config.certname and cert.lineagename != config.certname:
172 continue
173 if config.domains and not set(config.domains).issubset(cert.names()):
174 continue
175 now = pytz.UTC.fromutc(datetime.datetime.utcnow())
176
177 reasons = []
178 if cert.is_test_cert:
179 reasons.append('TEST_CERT')
180 if cert.target_expiry <= now:
181 reasons.append('EXPIRED')
182 if checker.ocsp_revoked(cert.cert, cert.chain):
183 reasons.append('REVOKED')
184
185 if reasons:
186 status = "INVALID: " + ", ".join(reasons)
187 else:
188 diff = cert.target_expiry - now
189 if diff.days == 1:
190 status = "VALID: 1 day"
191 elif diff.days < 1:
192 status = "VALID: {0} hour(s)".format(diff.seconds // 3600)
193 else:
194 status = "VALID: {0} days".format(diff.days)
195
196 valid_string = "{0} ({1})".format(cert.target_expiry, status)
197 certinfo.append(" Certificate Name: {0}\n"
198 " Domains: {1}\n"
199 " Expiry Date: {2}\n"
200 " Certificate Path: {3}\n"
201 " Private Key Path: {4}".format(
202 cert.lineagename,
203 " ".join(cert.names()),
204 valid_string,
205 cert.fullchain,
206 cert.privkey))
207 return "\n".join(certinfo)
208
209 def _describe_certs(config, parsed_certs, parse_failures):
210 """Print information about the certs we know about"""
211 out = []
212
213 notify = out.append
214
215 if not parsed_certs and not parse_failures:
216 notify("No certs found.")
217 else:
218 if parsed_certs:
219 match = "matching " if config.certname or config.domains else ""
220 notify("Found the following {0}certs:".format(match))
221 notify(_report_human_readable(config, parsed_certs))
222 if parse_failures:
223 notify("\nThe following renewal configuration files "
224 "were invalid:")
225 notify(_report_lines(parse_failures))
226
227 disp = zope.component.getUtility(interfaces.IDisplay)
228 disp.notification("\n".join(out), pause=False, wrap=False)
229
230 def _search_lineages(cli_config, func, initial_rv):
231 """Iterate func over unbroken lineages, allowing custom return conditions.
232
233 Allows flexible customization of return values, including multiple
234 return values and complex checks.
235 """
236 configs_dir = cli_config.renewal_configs_dir
237 # Verify the directory is there
238 util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
239
240 rv = initial_rv
241 for renewal_file in storage.renewal_conf_files(cli_config):
242 try:
243 candidate_lineage = storage.RenewableCert(renewal_file, cli_config)
244 except (errors.CertStorageError, IOError):
245 logger.debug("Renewal conf file %s is broken. Skipping.", renewal_file)
246 logger.debug("Traceback was:\n%s", traceback.format_exc())
247 continue
248 rv = func(candidate_lineage, rv)
249 return rv
250
[end of certbot/cert_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot/cert_manager.py b/certbot/cert_manager.py
--- a/certbot/cert_manager.py
+++ b/certbot/cert_manager.py
@@ -100,7 +100,10 @@
configs_dir = cli_config.renewal_configs_dir
# Verify the directory is there
util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
- renewal_file = storage.renewal_file_for_certname(cli_config, certname)
+ try:
+ renewal_file = storage.renewal_file_for_certname(cli_config, certname)
+ except errors.CertStorageError:
+ return None
try:
return storage.RenewableCert(renewal_file, cli_config)
except (errors.CertStorageError, IOError):
| {"golden_diff": "diff --git a/certbot/cert_manager.py b/certbot/cert_manager.py\n--- a/certbot/cert_manager.py\n+++ b/certbot/cert_manager.py\n@@ -100,7 +100,10 @@\n configs_dir = cli_config.renewal_configs_dir\n # Verify the directory is there\n util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())\n- renewal_file = storage.renewal_file_for_certname(cli_config, certname)\n+ try:\n+ renewal_file = storage.renewal_file_for_certname(cli_config, certname)\n+ except errors.CertStorageError:\n+ return None\n try:\n return storage.RenewableCert(renewal_file, cli_config)\n except (errors.CertStorageError, IOError):\n", "issue": "--cert-name option not honored on new SSL certificate issuance\nHello,\r\n\r\n```shell\r\nroot@stretch:~# certbot-auto --version\r\ncertbot 0.11.1\r\n```\r\n```shell\r\n# certbot-auto \\\r\ncertonly \\\r\n--non-interactive \\\r\n--agree-tos --email <email> \\\r\n--staging \\\r\n--webroot --webroot-path <path> \\\r\n--allow-subset-of-names \\\r\n--domain unresolvable.domain.tld \\\r\n--domain resolvable.domain.tld \\\r\n--cert-name unresolvable.domain.tld\r\n```\r\nResult in:\r\n```shell\r\nNo certificate found with name unresolvable.domain.tld (expected /etc/letsencrypt/renewal/unresolvable.domain.tld.conf).\r\n```\r\nBut help say (When creating a new certificate ...):\r\n```\r\n--cert-name CERTNAME Certificate name to apply. Only one certificate name\r\n can be used per Certbot run. To see certificate names,\r\n run 'certbot certificates'. When creating a new\r\n certificate, specifies the new certificate's name.\r\n (default: None)\r\n```\r\n\r\nThank you.\n", "before_files": [{"content": "\"\"\"Tools for managing certificates.\"\"\"\nimport datetime\nimport logging\nimport os\nimport pytz\nimport traceback\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot import ocsp\nfrom certbot import storage\nfrom certbot import util\n\nfrom certbot.display import util as display_util\n\nlogger = logging.getLogger(__name__)\n\n###################\n# Commands\n###################\n\ndef update_live_symlinks(config):\n \"\"\"Update the certificate file family symlinks to use archive_dir.\n\n Use the information in the config file to make symlinks point to\n the correct archive directory.\n\n .. note:: This assumes that the installation is using a Reverter object.\n\n :param config: Configuration.\n :type config: :class:`certbot.configuration.NamespaceConfig`\n\n \"\"\"\n for renewal_file in storage.renewal_conf_files(config):\n storage.RenewableCert(renewal_file, config, update_symlinks=True)\n\ndef rename_lineage(config):\n \"\"\"Rename the specified lineage to the new name.\n\n :param config: Configuration.\n :type config: :class:`certbot.configuration.NamespaceConfig`\n\n \"\"\"\n disp = zope.component.getUtility(interfaces.IDisplay)\n\n certname = _get_certname(config, \"rename\")\n\n new_certname = config.new_certname\n if not new_certname:\n code, new_certname = disp.input(\n \"Enter the new name for certificate {0}\".format(certname),\n flag=\"--updated-cert-name\", force_interactive=True)\n if code != display_util.OK or not new_certname:\n raise errors.Error(\"User ended interaction.\")\n\n lineage = lineage_for_certname(config, certname)\n if not lineage:\n raise errors.ConfigurationError(\"No existing certificate with name \"\n \"{0} found.\".format(certname))\n storage.rename_renewal_config(certname, new_certname, config)\n disp.notification(\"Successfully renamed {0} to {1}.\"\n .format(certname, new_certname), pause=False)\n\ndef certificates(config):\n \"\"\"Display information about certs configured with Certbot\n\n :param config: Configuration.\n :type config: :class:`certbot.configuration.NamespaceConfig`\n \"\"\"\n parsed_certs = []\n parse_failures = []\n for renewal_file in storage.renewal_conf_files(config):\n try:\n renewal_candidate = storage.RenewableCert(renewal_file, config)\n parsed_certs.append(renewal_candidate)\n except Exception as e: # pylint: disable=broad-except\n logger.warning(\"Renewal configuration file %s produced an \"\n \"unexpected error: %s. Skipping.\", renewal_file, e)\n logger.debug(\"Traceback was:\\n%s\", traceback.format_exc())\n parse_failures.append(renewal_file)\n\n # Describe all the certs\n _describe_certs(config, parsed_certs, parse_failures)\n\ndef delete(config):\n \"\"\"Delete Certbot files associated with a certificate lineage.\"\"\"\n certname = _get_certname(config, \"delete\")\n storage.delete_files(config, certname)\n disp = zope.component.getUtility(interfaces.IDisplay)\n disp.notification(\"Deleted all files relating to certificate {0}.\"\n .format(certname), pause=False)\n\n###################\n# Public Helpers\n###################\n\ndef lineage_for_certname(cli_config, certname):\n \"\"\"Find a lineage object with name certname.\"\"\"\n configs_dir = cli_config.renewal_configs_dir\n # Verify the directory is there\n util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())\n renewal_file = storage.renewal_file_for_certname(cli_config, certname)\n try:\n return storage.RenewableCert(renewal_file, cli_config)\n except (errors.CertStorageError, IOError):\n logger.debug(\"Renewal conf file %s is broken.\", renewal_file)\n logger.debug(\"Traceback was:\\n%s\", traceback.format_exc())\n return None\n\ndef domains_for_certname(config, certname):\n \"\"\"Find the domains in the cert with name certname.\"\"\"\n lineage = lineage_for_certname(config, certname)\n return lineage.names() if lineage else None\n\ndef find_duplicative_certs(config, domains):\n \"\"\"Find existing certs that duplicate the request.\"\"\"\n def update_certs_for_domain_matches(candidate_lineage, rv):\n \"\"\"Return cert as identical_names_cert if it matches,\n or subset_names_cert if it matches as subset\n \"\"\"\n # TODO: Handle these differently depending on whether they are\n # expired or still valid?\n identical_names_cert, subset_names_cert = rv\n candidate_names = set(candidate_lineage.names())\n if candidate_names == set(domains):\n identical_names_cert = candidate_lineage\n elif candidate_names.issubset(set(domains)):\n # This logic finds and returns the largest subset-names cert\n # in the case where there are several available.\n if subset_names_cert is None:\n subset_names_cert = candidate_lineage\n elif len(candidate_names) > len(subset_names_cert.names()):\n subset_names_cert = candidate_lineage\n return (identical_names_cert, subset_names_cert)\n\n return _search_lineages(config, update_certs_for_domain_matches, (None, None))\n\n\n###################\n# Private Helpers\n###################\n\ndef _get_certname(config, verb):\n \"\"\"Get certname from flag, interactively, or error out.\n \"\"\"\n certname = config.certname\n if not certname:\n disp = zope.component.getUtility(interfaces.IDisplay)\n filenames = storage.renewal_conf_files(config)\n choices = [storage.lineagename_for_filename(name) for name in filenames]\n if not choices:\n raise errors.Error(\"No existing certificates found.\")\n code, index = disp.menu(\"Which certificate would you like to {0}?\".format(verb),\n choices, ok_label=\"Select\", flag=\"--cert-name\",\n force_interactive=True)\n if code != display_util.OK or not index in range(0, len(choices)):\n raise errors.Error(\"User ended interaction.\")\n certname = choices[index]\n return certname\n\ndef _report_lines(msgs):\n \"\"\"Format a results report for a category of single-line renewal outcomes\"\"\"\n return \" \" + \"\\n \".join(str(msg) for msg in msgs)\n\ndef _report_human_readable(config, parsed_certs):\n \"\"\"Format a results report for a parsed cert\"\"\"\n certinfo = []\n checker = ocsp.RevocationChecker()\n for cert in parsed_certs:\n if config.certname and cert.lineagename != config.certname:\n continue\n if config.domains and not set(config.domains).issubset(cert.names()):\n continue\n now = pytz.UTC.fromutc(datetime.datetime.utcnow())\n\n reasons = []\n if cert.is_test_cert:\n reasons.append('TEST_CERT')\n if cert.target_expiry <= now:\n reasons.append('EXPIRED')\n if checker.ocsp_revoked(cert.cert, cert.chain):\n reasons.append('REVOKED')\n\n if reasons:\n status = \"INVALID: \" + \", \".join(reasons)\n else:\n diff = cert.target_expiry - now\n if diff.days == 1:\n status = \"VALID: 1 day\"\n elif diff.days < 1:\n status = \"VALID: {0} hour(s)\".format(diff.seconds // 3600)\n else:\n status = \"VALID: {0} days\".format(diff.days)\n\n valid_string = \"{0} ({1})\".format(cert.target_expiry, status)\n certinfo.append(\" Certificate Name: {0}\\n\"\n \" Domains: {1}\\n\"\n \" Expiry Date: {2}\\n\"\n \" Certificate Path: {3}\\n\"\n \" Private Key Path: {4}\".format(\n cert.lineagename,\n \" \".join(cert.names()),\n valid_string,\n cert.fullchain,\n cert.privkey))\n return \"\\n\".join(certinfo)\n\ndef _describe_certs(config, parsed_certs, parse_failures):\n \"\"\"Print information about the certs we know about\"\"\"\n out = []\n\n notify = out.append\n\n if not parsed_certs and not parse_failures:\n notify(\"No certs found.\")\n else:\n if parsed_certs:\n match = \"matching \" if config.certname or config.domains else \"\"\n notify(\"Found the following {0}certs:\".format(match))\n notify(_report_human_readable(config, parsed_certs))\n if parse_failures:\n notify(\"\\nThe following renewal configuration files \"\n \"were invalid:\")\n notify(_report_lines(parse_failures))\n\n disp = zope.component.getUtility(interfaces.IDisplay)\n disp.notification(\"\\n\".join(out), pause=False, wrap=False)\n\ndef _search_lineages(cli_config, func, initial_rv):\n \"\"\"Iterate func over unbroken lineages, allowing custom return conditions.\n\n Allows flexible customization of return values, including multiple\n return values and complex checks.\n \"\"\"\n configs_dir = cli_config.renewal_configs_dir\n # Verify the directory is there\n util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())\n\n rv = initial_rv\n for renewal_file in storage.renewal_conf_files(cli_config):\n try:\n candidate_lineage = storage.RenewableCert(renewal_file, cli_config)\n except (errors.CertStorageError, IOError):\n logger.debug(\"Renewal conf file %s is broken. Skipping.\", renewal_file)\n logger.debug(\"Traceback was:\\n%s\", traceback.format_exc())\n continue\n rv = func(candidate_lineage, rv)\n return rv\n", "path": "certbot/cert_manager.py"}]} | 3,511 | 185 |
gh_patches_debug_42192 | rasdani/github-patches | git_diff | opsdroid__opsdroid-692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change the way opsdroid prioritize regex / NLU skills
# Description
Actually opsdroid use a score system to decide what skill have to run when more than one skills match a message.
This score system is designed to prioritize NLU engines over regex (see #311).
The problem comes when you're working at the same time with some NLU engine and some regex skills. If the regex skill have some words of some NLU intent, it will be never executed, even though you write it exactly as in the regex.
## Steps to Reproduce
For example, you add a greetings intent to some NLU training with various sentences like 'hello', 'hello opsdroid', 'hi', 'hello there', 'whats up', 'good morning'...
And you have the min-score at 0.6.
Then you have a regex matcher with something like 'hello from (?P\w+)!'.
If a user writes "hello from Barcelona!" opsdroid will prioritize the NLU response, because it's almost secure that the NLU platform will detect the intent into the sentence.
## Expected Functionality
If a developer has the regex 'hello from (?P\w+)!' and it matches, it has to be prioritized over the NLU intent, because regex it's more specific.
## Proposed solution
My proposal is redesign the priority system by specificity. So I will prioritize skills in that order:
_parse (#450) > regex > NLU engines_
And, to decide what skill to execute if more than one match in the same level:
- In parse and regex, use the expression length. Longer expression normally it's more specific.
- In NLU engines, use the engine score (like now)
Any thought or discussion will be welcome :)
</issue>
<code>
[start of opsdroid/parsers/regex.py]
1 """A helper function for parsing and executing regex skills."""
2
3 import logging
4 import re
5
6 from opsdroid.const import REGEX_MAX_SCORE
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 async def calculate_score(regex):
13 """Calculate the score of a regex."""
14 # The score asymptotically approaches the max score
15 # based on the length of the expression.
16 return (1 - (1 / ((len(regex) + 1) ** 2))) * REGEX_MAX_SCORE
17
18
19 async def parse_regex(opsdroid, message):
20 """Parse a message against all regex skills."""
21 matched_skills = []
22 for skill in opsdroid.skills:
23 if "regex" in skill:
24 if skill["regex"]["case_sensitive"]:
25 regex = re.search(skill["regex"]["expression"],
26 message.text)
27 else:
28 regex = re.search(skill["regex"]["expression"],
29 message.text, re.IGNORECASE)
30 if regex:
31 message.regex = regex
32 matched_skills.append({
33 "score": await calculate_score(
34 skill["regex"]["expression"]),
35 "skill": skill["skill"],
36 "config": skill["config"],
37 "message": message
38 })
39 return matched_skills
40
[end of opsdroid/parsers/regex.py]
[start of opsdroid/const.py]
1 """Constants used by OpsDroid."""
2 import os
3 from appdirs import user_log_dir, user_config_dir, user_data_dir
4 from opsdroid import __version__ # noqa # pylint: disable=unused-import
5
6 NAME = 'opsdroid'
7 DEFAULT_GIT_URL = "https://github.com/opsdroid/"
8 MODULES_DIRECTORY = "opsdroid-modules"
9 DEFAULT_ROOT_PATH = user_data_dir(NAME)
10 DEFAULT_LOG_FILENAME = os.path.join(
11 user_log_dir(NAME, appauthor=False), 'output.log')
12 DEFAULT_MODULES_PATH = user_data_dir(NAME, MODULES_DIRECTORY)
13 DEFAULT_MODULE_DEPS_PATH = os.path.join(
14 user_data_dir(NAME, MODULES_DIRECTORY), "site-packages")
15 DEFAULT_CONFIG_PATH = os.path.join(
16 user_config_dir(NAME, appauthor=False), "configuration.yaml")
17 PRE_0_12_0_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
18 PRE_0_12_0_ROOT_PATH = os.path.expanduser("~/.opsdroid")
19 DEFAULT_MODULE_BRANCH = "master"
20 DEFAULT_LANGUAGE = 'en'
21 LOCALE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')
22 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
23 "configuration/example_configuration.yaml")
24 REGEX_MAX_SCORE = 0.6
25
26 RASANLU_DEFAULT_URL = "http://localhost:5000"
27 RASANLU_DEFAULT_PROJECT = "opsdroid"
28
29 LUISAI_DEFAULT_URL = "https://westus.api.cognitive.microsoft.com" \
30 "/luis/v2.0/apps/"
31
32 DIALOGFLOW_API_ENDPOINT = "https://api.dialogflow.com/v1/query"
33 DIALOGFLOW_API_VERSION = "20150910"
34
35 WITAI_DEFAULT_VERSION = "20170307"
36 WITAI_API_ENDPOINT = "https://api.wit.ai/message?"
37
38 RECASTAI_API_ENDPOINT = "https://api.recast.ai/v2/request"
39
[end of opsdroid/const.py]
[start of opsdroid/matchers.py]
1 """Decorator functions to use when creating skill modules."""
2
3 import logging
4
5 from opsdroid.helper import get_opsdroid
6 from opsdroid.web import Web
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 def match_regex(regex, case_sensitive=True):
13 """Return regex match decorator."""
14 def matcher(func):
15 """Add decorated function to skills list for regex matching."""
16 opsdroid = get_opsdroid()
17 if opsdroid:
18 config = opsdroid.loader.current_import_config
19 regex_setup = {
20 "expression": regex,
21 "case_sensitive": case_sensitive
22 }
23 opsdroid.skills.append({"regex": regex_setup,
24 "skill": func,
25 "config": config})
26 return func
27 return matcher
28
29
30 def match_apiai_action(action):
31 """Return Dialogflow action match decorator."""
32 def matcher(func):
33 """Add decorated function to skills list for Dialogflow matching."""
34 opsdroid = get_opsdroid()
35 if opsdroid:
36 config = opsdroid.loader.current_import_config
37 opsdroid.skills.append({"dialogflow_action": action,
38 "skill": func,
39 "config": config})
40 return func
41 _LOGGER.warning(_("Api.ai is now called Dialogflow, this matcher "
42 "will stop working in the future. "
43 "Use match_dialogflow_action instead."))
44 return matcher
45
46
47 def match_apiai_intent(intent):
48 """Return Dialogflow intent match decorator."""
49 def matcher(func):
50 """Add decorated function to skills list for Dialogflow matching."""
51 opsdroid = get_opsdroid()
52 if opsdroid:
53 config = opsdroid.loader.current_import_config
54 opsdroid.skills.append({"dialogflow_intent": intent,
55 "skill": func,
56 "config": config})
57 return func
58 _LOGGER.warning(_("Api.ai is now called Dialogflow, this matcher "
59 "will stop working in the future. "
60 "Use match_dialogflow_intent instead."))
61 return matcher
62
63
64 def match_dialogflow_action(action):
65 """Return Dialogflowi action match decorator."""
66 def matcher(func):
67 """Add decorated function to skills list for Dialogflow matching."""
68 opsdroid = get_opsdroid()
69 if opsdroid:
70 config = opsdroid.loader.current_import_config
71 opsdroid.skills.append({"dialogflow_action": action,
72 "skill": func,
73 "config": config})
74 return func
75 return matcher
76
77
78 def match_dialogflow_intent(intent):
79 """Return Dialogflow intent match decorator."""
80 def matcher(func):
81 """Add decorated function to skills list for Dialogflow matching."""
82 opsdroid = get_opsdroid()
83 if opsdroid:
84 config = opsdroid.loader.current_import_config
85 opsdroid.skills.append({"dialogflow_intent": intent,
86 "skill": func,
87 "config": config})
88 return func
89 return matcher
90
91
92 def match_luisai_intent(intent):
93 """Return luisai intent match decorator."""
94 def matcher(func):
95 """Add decorated function to skills list for luisai matching."""
96 opsdroid = get_opsdroid()
97 if opsdroid:
98 config = opsdroid.loader.current_import_config
99 opsdroid.skills.append({"luisai_intent": intent,
100 "skill": func,
101 "config": config})
102 return func
103 return matcher
104
105
106 def match_rasanlu(intent):
107 """Return Rasa NLU intent match decorator."""
108 def matcher(func):
109 """Add decorated function to skills list for Rasa NLU matching."""
110 opsdroid = get_opsdroid()
111 if opsdroid:
112 config = opsdroid.loader.current_import_config
113 opsdroid.skills.append({"rasanlu_intent": intent,
114 "skill": func,
115 "config": config})
116 return func
117 return matcher
118
119
120 def match_recastai(intent):
121 """Return recastai intent match decorator."""
122 def matcher(func):
123 """Add decorated function to skills list for recastai matching."""
124 opsdroid = get_opsdroid()
125 if opsdroid:
126 config = opsdroid.loader.current_import_config
127 opsdroid.skills.append({"recastai_intent": intent,
128 "skill": func,
129 "config": config})
130 return func
131 return matcher
132
133
134 def match_witai(intent):
135 """Return witai intent match decorator."""
136 def matcher(func):
137 """Add decorated function to skills list for witai matching."""
138 opsdroid = get_opsdroid()
139 if opsdroid:
140 config = opsdroid.loader.current_import_config
141 opsdroid.skills.append({"witai_intent": intent,
142 "skill": func,
143 "config": config})
144 return func
145 return matcher
146
147
148 def match_crontab(crontab, timezone=None):
149 """Return crontab match decorator."""
150 def matcher(func):
151 """Add decorated function to skills list for crontab matching."""
152 opsdroid = get_opsdroid()
153 if opsdroid:
154 config = opsdroid.loader.current_import_config
155 opsdroid.skills.append({"crontab": crontab,
156 "skill": func,
157 "config": config,
158 "timezone": timezone})
159 return func
160 return matcher
161
162
163 def match_webhook(webhook):
164 """Return webhook match decorator."""
165 def matcher(func):
166 """Add decorated function to skills list for webhook matching."""
167 opsdroid = get_opsdroid()
168 if opsdroid:
169 config = opsdroid.loader.current_import_config
170 opsdroid.skills.append({"webhook": webhook,
171 "skill": func,
172 "config": config})
173
174 async def wrapper(req, opsdroid=opsdroid, config=config):
175 """Wrap up the aiohttp handler."""
176 _LOGGER.info("Running skill %s via webhook", webhook)
177 opsdroid.stats["webhooks_called"] = \
178 opsdroid.stats["webhooks_called"] + 1
179 await func(opsdroid, config, req)
180 return Web.build_response(200, {"called_skill": webhook})
181
182 opsdroid.web_server.web_app.router.add_post(
183 "/skill/{}/{}".format(config["name"], webhook), wrapper)
184 opsdroid.web_server.web_app.router.add_post(
185 "/skill/{}/{}/".format(config["name"], webhook), wrapper)
186
187 return func
188 return matcher
189
190
191 def match_always(func=None):
192 """Return always match decorator."""
193 def matcher(func):
194 """Add decorated function to skills list for always matching."""
195 opsdroid = get_opsdroid()
196 if opsdroid:
197 config = opsdroid.loader.current_import_config
198 opsdroid.skills.append({"always": True,
199 "skill": func,
200 "config": config})
201 return func
202
203 # Allow for decorator with or without parenthesis as there are no args.
204 if callable(func):
205 return matcher(func)
206 return matcher
207
[end of opsdroid/matchers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/const.py b/opsdroid/const.py
--- a/opsdroid/const.py
+++ b/opsdroid/const.py
@@ -21,7 +21,7 @@
LOCALE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')
EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"configuration/example_configuration.yaml")
-REGEX_MAX_SCORE = 0.6
+REGEX_SCORE_FACTOR = 0.6
RASANLU_DEFAULT_URL = "http://localhost:5000"
RASANLU_DEFAULT_PROJECT = "opsdroid"
diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py
--- a/opsdroid/matchers.py
+++ b/opsdroid/matchers.py
@@ -2,6 +2,7 @@
import logging
+from opsdroid.const import REGEX_SCORE_FACTOR
from opsdroid.helper import get_opsdroid
from opsdroid.web import Web
@@ -9,7 +10,7 @@
_LOGGER = logging.getLogger(__name__)
-def match_regex(regex, case_sensitive=True):
+def match_regex(regex, case_sensitive=True, score_factor=None):
"""Return regex match decorator."""
def matcher(func):
"""Add decorated function to skills list for regex matching."""
@@ -18,7 +19,8 @@
config = opsdroid.loader.current_import_config
regex_setup = {
"expression": regex,
- "case_sensitive": case_sensitive
+ "case_sensitive": case_sensitive,
+ "score_factor": score_factor or REGEX_SCORE_FACTOR,
}
opsdroid.skills.append({"regex": regex_setup,
"skill": func,
diff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py
--- a/opsdroid/parsers/regex.py
+++ b/opsdroid/parsers/regex.py
@@ -3,17 +3,14 @@
import logging
import re
-from opsdroid.const import REGEX_MAX_SCORE
-
-
_LOGGER = logging.getLogger(__name__)
-async def calculate_score(regex):
+async def calculate_score(regex, score_factor):
"""Calculate the score of a regex."""
# The score asymptotically approaches the max score
# based on the length of the expression.
- return (1 - (1 / ((len(regex) + 1) ** 2))) * REGEX_MAX_SCORE
+ return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor
async def parse_regex(opsdroid, message):
@@ -21,17 +18,18 @@
matched_skills = []
for skill in opsdroid.skills:
if "regex" in skill:
- if skill["regex"]["case_sensitive"]:
- regex = re.search(skill["regex"]["expression"],
+ opts = skill["regex"]
+ if opts["case_sensitive"]:
+ regex = re.search(opts["expression"],
message.text)
else:
- regex = re.search(skill["regex"]["expression"],
+ regex = re.search(opts["expression"],
message.text, re.IGNORECASE)
if regex:
message.regex = regex
matched_skills.append({
"score": await calculate_score(
- skill["regex"]["expression"]),
+ opts["expression"], opts["score_factor"]),
"skill": skill["skill"],
"config": skill["config"],
"message": message
| {"golden_diff": "diff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -21,7 +21,7 @@\n LOCALE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')\n EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n-REGEX_MAX_SCORE = 0.6\n+REGEX_SCORE_FACTOR = 0.6\n \n RASANLU_DEFAULT_URL = \"http://localhost:5000\"\n RASANLU_DEFAULT_PROJECT = \"opsdroid\"\ndiff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -2,6 +2,7 @@\n \n import logging\n \n+from opsdroid.const import REGEX_SCORE_FACTOR\n from opsdroid.helper import get_opsdroid\n from opsdroid.web import Web\n \n@@ -9,7 +10,7 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def match_regex(regex, case_sensitive=True):\n+def match_regex(regex, case_sensitive=True, score_factor=None):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n@@ -18,7 +19,8 @@\n config = opsdroid.loader.current_import_config\n regex_setup = {\n \"expression\": regex,\n- \"case_sensitive\": case_sensitive\n+ \"case_sensitive\": case_sensitive,\n+ \"score_factor\": score_factor or REGEX_SCORE_FACTOR,\n }\n opsdroid.skills.append({\"regex\": regex_setup,\n \"skill\": func,\ndiff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py\n--- a/opsdroid/parsers/regex.py\n+++ b/opsdroid/parsers/regex.py\n@@ -3,17 +3,14 @@\n import logging\n import re\n \n-from opsdroid.const import REGEX_MAX_SCORE\n-\n-\n _LOGGER = logging.getLogger(__name__)\n \n \n-async def calculate_score(regex):\n+async def calculate_score(regex, score_factor):\n \"\"\"Calculate the score of a regex.\"\"\"\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n- return (1 - (1 / ((len(regex) + 1) ** 2))) * REGEX_MAX_SCORE\n+ return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor\n \n \n async def parse_regex(opsdroid, message):\n@@ -21,17 +18,18 @@\n matched_skills = []\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n- if skill[\"regex\"][\"case_sensitive\"]:\n- regex = re.search(skill[\"regex\"][\"expression\"],\n+ opts = skill[\"regex\"]\n+ if opts[\"case_sensitive\"]:\n+ regex = re.search(opts[\"expression\"],\n message.text)\n else:\n- regex = re.search(skill[\"regex\"][\"expression\"],\n+ regex = re.search(opts[\"expression\"],\n message.text, re.IGNORECASE)\n if regex:\n message.regex = regex\n matched_skills.append({\n \"score\": await calculate_score(\n- skill[\"regex\"][\"expression\"]),\n+ opts[\"expression\"], opts[\"score_factor\"]),\n \"skill\": skill[\"skill\"],\n \"config\": skill[\"config\"],\n \"message\": message\n", "issue": "Change the way opsdroid prioritize regex / NLU skills\n# Description\r\nActually opsdroid use a score system to decide what skill have to run when more than one skills match a message.\r\nThis score system is designed to prioritize NLU engines over regex (see #311).\r\n\r\nThe problem comes when you're working at the same time with some NLU engine and some regex skills. If the regex skill have some words of some NLU intent, it will be never executed, even though you write it exactly as in the regex.\r\n\r\n## Steps to Reproduce\r\nFor example, you add a greetings intent to some NLU training with various sentences like 'hello', 'hello opsdroid', 'hi', 'hello there', 'whats up', 'good morning'...\r\nAnd you have the min-score at 0.6.\r\n\r\nThen you have a regex matcher with something like 'hello from (?P\\w+)!'.\r\n\r\nIf a user writes \"hello from Barcelona!\" opsdroid will prioritize the NLU response, because it's almost secure that the NLU platform will detect the intent into the sentence.\r\n\r\n## Expected Functionality\r\nIf a developer has the regex 'hello from (?P\\w+)!' and it matches, it has to be prioritized over the NLU intent, because regex it's more specific.\r\n\r\n## Proposed solution\r\nMy proposal is redesign the priority system by specificity. So I will prioritize skills in that order:\r\n_parse (#450) > regex > NLU engines_\r\n\r\nAnd, to decide what skill to execute if more than one match in the same level:\r\n- In parse and regex, use the expression length. Longer expression normally it's more specific.\r\n- In NLU engines, use the engine score (like now)\r\n\r\nAny thought or discussion will be welcome :)\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing regex skills.\"\"\"\n\nimport logging\nimport re\n\nfrom opsdroid.const import REGEX_MAX_SCORE\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def calculate_score(regex):\n \"\"\"Calculate the score of a regex.\"\"\"\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(regex) + 1) ** 2))) * REGEX_MAX_SCORE\n\n\nasync def parse_regex(opsdroid, message):\n \"\"\"Parse a message against all regex skills.\"\"\"\n matched_skills = []\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n if skill[\"regex\"][\"case_sensitive\"]:\n regex = re.search(skill[\"regex\"][\"expression\"],\n message.text)\n else:\n regex = re.search(skill[\"regex\"][\"expression\"],\n message.text, re.IGNORECASE)\n if regex:\n message.regex = regex\n matched_skills.append({\n \"score\": await calculate_score(\n skill[\"regex\"][\"expression\"]),\n \"skill\": skill[\"skill\"],\n \"config\": skill[\"config\"],\n \"message\": message\n })\n return matched_skills\n", "path": "opsdroid/parsers/regex.py"}, {"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\nfrom appdirs import user_log_dir, user_config_dir, user_data_dir\nfrom opsdroid import __version__ # noqa # pylint: disable=unused-import\n\nNAME = 'opsdroid'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = user_data_dir(NAME)\nDEFAULT_LOG_FILENAME = os.path.join(\n user_log_dir(NAME, appauthor=False), 'output.log')\nDEFAULT_MODULES_PATH = user_data_dir(NAME, MODULES_DIRECTORY)\nDEFAULT_MODULE_DEPS_PATH = os.path.join(\n user_data_dir(NAME, MODULES_DIRECTORY), \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(\n user_config_dir(NAME, appauthor=False), \"configuration.yaml\")\nPRE_0_12_0_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nPRE_0_12_0_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\nDEFAULT_MODULE_BRANCH = \"master\"\nDEFAULT_LANGUAGE = 'en'\nLOCALE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\nREGEX_MAX_SCORE = 0.6\n\nRASANLU_DEFAULT_URL = \"http://localhost:5000\"\nRASANLU_DEFAULT_PROJECT = \"opsdroid\"\n\nLUISAI_DEFAULT_URL = \"https://westus.api.cognitive.microsoft.com\" \\\n \"/luis/v2.0/apps/\"\n\nDIALOGFLOW_API_ENDPOINT = \"https://api.dialogflow.com/v1/query\"\nDIALOGFLOW_API_VERSION = \"20150910\"\n\nWITAI_DEFAULT_VERSION = \"20170307\"\nWITAI_API_ENDPOINT = \"https://api.wit.ai/message?\"\n\nRECASTAI_API_ENDPOINT = \"https://api.recast.ai/v2/request\"\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex, case_sensitive=True):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n regex_setup = {\n \"expression\": regex,\n \"case_sensitive\": case_sensitive\n }\n opsdroid.skills.append({\"regex\": regex_setup,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return Dialogflow action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for Dialogflow matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"dialogflow_action\": action,\n \"skill\": func,\n \"config\": config})\n return func\n _LOGGER.warning(_(\"Api.ai is now called Dialogflow, this matcher \"\n \"will stop working in the future. \"\n \"Use match_dialogflow_action instead.\"))\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return Dialogflow intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for Dialogflow matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"dialogflow_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n _LOGGER.warning(_(\"Api.ai is now called Dialogflow, this matcher \"\n \"will stop working in the future. \"\n \"Use match_dialogflow_intent instead.\"))\n return matcher\n\n\ndef match_dialogflow_action(action):\n \"\"\"Return Dialogflowi action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for Dialogflow matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"dialogflow_action\": action,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_dialogflow_intent(intent):\n \"\"\"Return Dialogflow intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for Dialogflow matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"dialogflow_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_luisai_intent(intent):\n \"\"\"Return luisai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for luisai matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"luisai_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_rasanlu(intent):\n \"\"\"Return Rasa NLU intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for Rasa NLU matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"rasanlu_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_recastai(intent):\n \"\"\"Return recastai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for recastai matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"recastai_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_witai(intent):\n \"\"\"Return witai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for witai matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"witai_intent\": intent,\n \"skill\": func,\n \"config\": config})\n return func\n return matcher\n\n\ndef match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab,\n \"skill\": func,\n \"config\": config,\n \"timezone\": timezone})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook,\n \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n\n\ndef match_always(func=None):\n \"\"\"Return always match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for always matching.\"\"\"\n opsdroid = get_opsdroid()\n if opsdroid:\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"always\": True,\n \"skill\": func,\n \"config\": config})\n return func\n\n # Allow for decorator with or without parenthesis as there are no args.\n if callable(func):\n return matcher(func)\n return matcher\n", "path": "opsdroid/matchers.py"}]} | 3,762 | 774 |
gh_patches_debug_2560 | rasdani/github-patches | git_diff | python-poetry__poetry-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`poetry shell` with fish does not echo in python REPL
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: MacOS Catalina 10.15.1
- **Poetry version**: 1.0.0b8
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A
## Issue
As described by [L0stLink](https://github.com/sdispater/poetry/issues/1593#issuecomment-555132468) in a now-closed issue, `fish` still appears to have some issues when using `poetry shell`. Specifically, in the Python REPL "typed text not visible, but pressing enter shows that the input was registered and whatever was typed, executes".
It appears that the fix in #1621 only addressed the `bash` case.
</issue>
<code>
[start of poetry/utils/shell.py]
1 import os
2 import signal
3 import sys
4
5 import pexpect
6
7 from clikit.utils.terminal import Terminal
8 from shellingham import ShellDetectionFailure
9 from shellingham import detect_shell
10
11 from ._compat import WINDOWS
12 from .env import VirtualEnv
13
14
15 class Shell:
16 """
17 Represents the current shell.
18 """
19
20 _shell = None
21
22 def __init__(self, name, path): # type: (str, str) -> None
23 self._name = name
24 self._path = path
25
26 @property
27 def name(self): # type: () -> str
28 return self._name
29
30 @property
31 def path(self): # type: () -> str
32 return self._path
33
34 @classmethod
35 def get(cls): # type: () -> Shell
36 """
37 Retrieve the current shell.
38 """
39 if cls._shell is not None:
40 return cls._shell
41
42 try:
43 name, path = detect_shell(os.getpid())
44 except (RuntimeError, ShellDetectionFailure):
45 raise RuntimeError("Unable to detect the current shell.")
46
47 cls._shell = cls(name, path)
48
49 return cls._shell
50
51 def activate(self, env): # type: (VirtualEnv) -> None
52 if WINDOWS:
53 return env.execute(self.path)
54
55 terminal = Terminal()
56 with env.temp_environ():
57 c = pexpect.spawn(
58 self._path, ["-i"], dimensions=(terminal.height, terminal.width)
59 )
60
61 if not self._name == "bash":
62 c.setecho(False)
63
64 activate_script = self._get_activate_script()
65 bin_dir = "Scripts" if WINDOWS else "bin"
66 activate_path = env.path / bin_dir / activate_script
67 c.sendline("{} {}".format(self._get_source_command(), activate_path))
68
69 def resize(sig, data):
70 terminal = Terminal()
71 c.setwinsize(terminal.height, terminal.width)
72
73 signal.signal(signal.SIGWINCH, resize)
74
75 # Interact with the new shell.
76 c.interact(escape_character=None)
77 c.close()
78
79 sys.exit(c.exitstatus)
80
81 def _get_activate_script(self):
82 if "fish" == self._name:
83 suffix = ".fish"
84 elif "csh" == self._name:
85 suffix = ".csh"
86 else:
87 suffix = ""
88
89 return "activate" + suffix
90
91 def _get_source_command(self):
92 if "fish" == self._name:
93 return "source"
94 elif "csh" == self._name:
95 return "source"
96
97 return "."
98
99 def __repr__(self): # type: () -> str
100 return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path)
101
[end of poetry/utils/shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py
--- a/poetry/utils/shell.py
+++ b/poetry/utils/shell.py
@@ -58,7 +58,7 @@
self._path, ["-i"], dimensions=(terminal.height, terminal.width)
)
- if not self._name == "bash":
+ if self._name == "zsh":
c.setecho(False)
activate_script = self._get_activate_script()
| {"golden_diff": "diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py\n--- a/poetry/utils/shell.py\n+++ b/poetry/utils/shell.py\n@@ -58,7 +58,7 @@\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n \n- if not self._name == \"bash\":\n+ if self._name == \"zsh\":\n c.setecho(False)\n \n activate_script = self._get_activate_script()\n", "issue": "`poetry shell` with fish does not echo in python REPL\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: MacOS Catalina 10.15.1\r\n- **Poetry version**: 1.0.0b8\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A\r\n\r\n## Issue\r\nAs described by [L0stLink](https://github.com/sdispater/poetry/issues/1593#issuecomment-555132468) in a now-closed issue, `fish` still appears to have some issues when using `poetry shell`. Specifically, in the Python REPL \"typed text not visible, but pressing enter shows that the input was registered and whatever was typed, executes\".\r\n\r\nIt appears that the fix in #1621 only addressed the `bash` case.\n", "before_files": [{"content": "import os\nimport signal\nimport sys\n\nimport pexpect\n\nfrom clikit.utils.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom ._compat import WINDOWS\nfrom .env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env): # type: (VirtualEnv) -> None\n if WINDOWS:\n return env.execute(self.path)\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n if not self._name == \"bash\":\n c.setecho(False)\n\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n\n def resize(sig, data):\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self):\n if \"fish\" == self._name:\n suffix = \".fish\"\n elif \"csh\" == self._name:\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self):\n if \"fish\" == self._name:\n return \"source\"\n elif \"csh\" == self._name:\n return \"source\"\n\n return \".\"\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}]} | 1,620 | 111 |
gh_patches_debug_24650 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-1690 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch to using `pathlib` instead of `os.path`
In newer Python code, it's generally recommended to use [`pathlib`](https://docs.python.org/3/library/pathlib.html) instead of `os.path`. There are [a few places where we use `os.path`](https://github.com/PlasmaPy/PlasmaPy/search?q=os.path+-plasmapy_sphinx) where we should switch to `pathlib`.
There are also [places where we are using `pathlib`](https://github.com/PlasmaPy/PlasmaPy/search?q=pathlib&type=code) which could be used as examples.
We shouldn't make this change in `docs/plasmapy_sphinx` since that's being moved to another package (#1643).
Thanks!
</issue>
<code>
[start of plasmapy/plasma/sources/openpmd_hdf5.py]
1 __all__ = ["HDF5Reader"]
2
3 import astropy.units as u
4 import h5py
5 import numpy as np
6 import os
7
8 from packaging.version import Version
9
10 from plasmapy.plasma.exceptions import DataStandardError
11 from plasmapy.plasma.plasma_base import GenericPlasma
12
13 _OUTDATED_VERSION = "1.1.0"
14 _NEWER_VERSION = "2.0.0"
15
16 # This is the order what OpenPMD uses to store unit
17 # dimensions for a record.
18 _UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)
19
20
21 def _fetch_units(openPMD_dims):
22 """Converts a collection of OpenPMD dimensions to astropy.units."""
23
24 units = u.dimensionless_unscaled
25 for factor, unit in zip(openPMD_dims, _UNITS):
26 units *= unit**factor
27 units, *_ = units.compose()
28 return units
29
30
31 def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
32 """Checks if the passed version is supported or not."""
33
34 parsed_version = Version(openPMD_version)
35 outdated_version = Version(outdated)
36 newer_version = Version(newer)
37 return outdated_version <= parsed_version < newer_version
38
39
40 class HDF5Reader(GenericPlasma):
41 """
42 Core class for accessing various attributes on HDF5 files that
43 are based on OpenPMD_ standards.
44
45 Parameters
46 ----------
47 hdf5 : `str`
48 Path to HDF5 file.
49
50 **kwargs
51 Any keyword accepted by `~plasmapy.plasma.plasma_base.GenericPlasma`.
52
53 """
54
55 def __init__(self, hdf5, **kwargs):
56 super().__init__(**kwargs)
57
58 if not os.path.isfile(hdf5):
59 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
60
61 h5 = h5py.File(hdf5, "r")
62 self.h5 = h5
63
64 self._check_valid_openpmd_version()
65
66 self.subname = tuple(self.h5["data"])[0]
67
68 def __enter__(self):
69 return self.h5
70
71 def close(self):
72 self.h5.close()
73
74 def __exit__(self):
75 self.h5.close()
76
77 def _check_valid_openpmd_version(self):
78 try:
79 openPMD_version = self.h5.attrs["openPMD"].decode("utf-8")
80 if _valid_version(openPMD_version):
81 return True
82 else:
83 raise DataStandardError(
84 f"We currently only support HDF5 versions"
85 f"starting from v{_OUTDATED_VERSION} and "
86 f"lower than v{_NEWER_VERSION}. You can "
87 f"however convert your HDF5 to a supported "
88 f"version. For more information; see "
89 f"https://github.com/openPMD/openPMD-updater"
90 )
91 except KeyError:
92 raise DataStandardError(
93 "Input HDF5 file does not go on with standards defined by OpenPMD"
94 )
95
96 @property
97 def electric_field(self):
98 """
99 An (x, y, z) array containing electric field data. (Returned as an astropy
100 `~astropy.units.Quantity`.)
101 """
102 path = f"data/{self.subname}/fields/E"
103 if path in self.h5:
104 units = _fetch_units(self.h5[path].attrs["unitDimension"])
105 axes = [self.h5[path][axis] for axis in self.h5[path]]
106 return np.array(axes) * units
107 else:
108 raise AttributeError("No electric field data available in HDF5 file")
109
110 @property
111 def charge_density(self):
112 """
113 An array containing charge density data. (Returned as an astropy
114 `~astropy.units.Quantity`.)
115 """
116 path = f"data/{self.subname}/fields/rho"
117 if path in self.h5:
118 units = _fetch_units(self.h5[path].attrs["unitDimension"])
119 return np.array(self.h5[path]) * units
120 else:
121 raise AttributeError("No charge density data available in HDF5 file")
122
123 @property
124 def magnetic_field(self):
125 path = f"data/{self.subname}/fields/B"
126 if path in self.h5:
127 units = _fetch_units(self.h5[path].attrs["unitDimension"])
128 axes = [self.h5[path][axis] for axis in self.h5[path]]
129 return np.array(axes) * units
130 else:
131 raise AttributeError("No magnetic field data available in HDF5 file")
132
133 @property
134 def electric_current(self):
135 path = f"data/{self.subname}/fields/J"
136 if path in self.h5:
137 units = _fetch_units(self.h5[path].attrs["unitDimension"])
138 axes = [self.h5[path][axis] for axis in self.h5[path]]
139 return np.array(axes) * units
140 else:
141 raise AttributeError("No electric current data available in HDF5 file")
142
143 @classmethod
144 def is_datasource_for(cls, **kwargs):
145 if "hdf5" not in kwargs:
146 return False
147
148 hdf5 = kwargs.get("hdf5")
149 openPMD = kwargs.get("openPMD")
150
151 isfile = os.path.isfile(hdf5)
152 if not isfile:
153 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
154
155 if "openPMD" not in kwargs:
156
157 h5 = h5py.File(hdf5, "r")
158 try:
159 openPMD = h5.attrs["openPMD"]
160 except KeyError:
161 openPMD = False
162
163 return openPMD
164
[end of plasmapy/plasma/sources/openpmd_hdf5.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py
--- a/plasmapy/plasma/sources/openpmd_hdf5.py
+++ b/plasmapy/plasma/sources/openpmd_hdf5.py
@@ -3,9 +3,9 @@
import astropy.units as u
import h5py
import numpy as np
-import os
from packaging.version import Version
+from pathlib import Path
from plasmapy.plasma.exceptions import DataStandardError
from plasmapy.plasma.plasma_base import GenericPlasma
@@ -55,7 +55,7 @@
def __init__(self, hdf5, **kwargs):
super().__init__(**kwargs)
- if not os.path.isfile(hdf5):
+ if not Path(hdf5).is_file():
raise FileNotFoundError(f"Could not find file: '{hdf5}'")
h5 = h5py.File(hdf5, "r")
@@ -148,8 +148,7 @@
hdf5 = kwargs.get("hdf5")
openPMD = kwargs.get("openPMD")
- isfile = os.path.isfile(hdf5)
- if not isfile:
+ if not Path(hdf5).is_file():
raise FileNotFoundError(f"Could not find file: '{hdf5}'")
if "openPMD" not in kwargs:
| {"golden_diff": "diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py\n--- a/plasmapy/plasma/sources/openpmd_hdf5.py\n+++ b/plasmapy/plasma/sources/openpmd_hdf5.py\n@@ -3,9 +3,9 @@\n import astropy.units as u\n import h5py\n import numpy as np\n-import os\n \n from packaging.version import Version\n+from pathlib import Path\n \n from plasmapy.plasma.exceptions import DataStandardError\n from plasmapy.plasma.plasma_base import GenericPlasma\n@@ -55,7 +55,7 @@\n def __init__(self, hdf5, **kwargs):\n super().__init__(**kwargs)\n \n- if not os.path.isfile(hdf5):\n+ if not Path(hdf5).is_file():\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n \n h5 = h5py.File(hdf5, \"r\")\n@@ -148,8 +148,7 @@\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n \n- isfile = os.path.isfile(hdf5)\n- if not isfile:\n+ if not Path(hdf5).is_file():\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n \n if \"openPMD\" not in kwargs:\n", "issue": "Switch to using `pathlib` instead of `os.path`\nIn newer Python code, it's generally recommended to use [`pathlib`](https://docs.python.org/3/library/pathlib.html) instead of `os.path`. There are [a few places where we use `os.path`](https://github.com/PlasmaPy/PlasmaPy/search?q=os.path+-plasmapy_sphinx) where we should switch to `pathlib`. \r\n\r\nThere are also [places where we are using `pathlib`](https://github.com/PlasmaPy/PlasmaPy/search?q=pathlib&type=code) which could be used as examples. \r\n\r\nWe shouldn't make this change in `docs/plasmapy_sphinx` since that's being moved to another package (#1643).\r\n\r\nThanks!\n", "before_files": [{"content": "__all__ = [\"HDF5Reader\"]\n\nimport astropy.units as u\nimport h5py\nimport numpy as np\nimport os\n\nfrom packaging.version import Version\n\nfrom plasmapy.plasma.exceptions import DataStandardError\nfrom plasmapy.plasma.plasma_base import GenericPlasma\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"Converts a collection of OpenPMD dimensions to astropy.units.\"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= unit**factor\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n\n parsed_version = Version(openPMD_version)\n outdated_version = Version(outdated)\n newer_version = Version(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n \"\"\"\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD_ standards.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n **kwargs\n Any keyword accepted by `~plasmapy.plasma.plasma_base.GenericPlasma`.\n\n \"\"\"\n\n def __init__(self, hdf5, **kwargs):\n super().__init__(**kwargs)\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n h5 = h5py.File(hdf5, \"r\")\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5[\"data\"])[0]\n\n def __enter__(self):\n return self.h5\n\n def close(self):\n self.h5.close()\n\n def __exit__(self):\n self.h5.close()\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode(\"utf-8\")\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(\n f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\"\n )\n except KeyError:\n raise DataStandardError(\n \"Input HDF5 file does not go on with standards defined by OpenPMD\"\n )\n\n @property\n def electric_field(self):\n \"\"\"\n An (x, y, z) array containing electric field data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available in HDF5 file\")\n\n @property\n def charge_density(self):\n \"\"\"\n An array containing charge density data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n\n h5 = h5py.File(hdf5, \"r\")\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/plasma/sources/openpmd_hdf5.py"}]} | 2,365 | 324 |
gh_patches_debug_34178 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add check for mandatory functions in model files
Users are required to implement some functions(e.g, `loss()`) in the model file. We can add some checks for the model file to see if these required functions are implemented correctly. Otherwise, if the functions are not implemented (correctly), it won't be found until the job starts to run on k8s cluster.
</issue>
<code>
[start of elasticdl/python/common/model_helper.py]
1 import importlib.util
2 import os
3
4 from elasticdl.python.common.log_util import default_logger as logger
5 from elasticdl.python.worker.prediction_outputs_processor import (
6 BasePredictionOutputsProcessor,
7 )
8
9
10 def load_module(module_file):
11 spec = importlib.util.spec_from_file_location(module_file, module_file)
12 module = importlib.util.module_from_spec(spec)
13 spec.loader.exec_module(module)
14 return module
15
16
17 # TODO: Discuss whether we need to support default model
18 # function/class names such as `custom_model()`
19 # or `CustomModel()`
20 def load_model_from_module(model_def, model_module, model_params):
21 model_def_name = model_def.split(".")[-1]
22 if model_def_name in model_module:
23 custom_model_name = model_def_name
24 else:
25 raise ValueError(
26 "Cannot find the custom model function/class "
27 "in model definition files"
28 )
29 if model_params:
30 kvs = model_params.split(",")
31 model_params_dict = {}
32 for kv in kvs:
33 k, v = kv.split("=")
34 model_params_dict[k] = eval(v)
35 return model_module[custom_model_name](**model_params_dict)
36 else:
37 return model_module[custom_model_name]()
38
39
40 def get_module_file_path(model_zoo, spec_key):
41 """Get the path to module file from model zoo and the spec string.
42
43 For example, if `model_zoo = "model_zoo"` and
44 `spec_key = "test_module.custom_model"`, the function returns
45 "model_zoo/test_module.py".
46 """
47 return os.path.join(model_zoo, "/".join(spec_key.split(".")[:-1]) + ".py")
48
49
50 def _get_spec_value(spec_key, model_zoo, default_module):
51 """Get the value to the given spec key.
52
53 Notes:
54
55 * If the dot-splitted spec key (e.g. "test_module.custom_model"
56 is splitted into "test_module" and "custom_model") is of length 1
57 (e.g. `spec_key` is "custom_model"), return the value in the
58 specified `default_module`.
59 * If the spec key does not exist in the module, return `None`.
60 """
61 spec_key_items = spec_key.split(".")
62 spec_key_base = spec_key_items[-1]
63 if len(spec_key_items) == 1:
64 spec_key_module = default_module
65 else:
66 spec_key_module = load_module(
67 get_module_file_path(model_zoo, spec_key)
68 ).__dict__
69 return (
70 spec_key_module[spec_key_base]
71 if spec_key_base in spec_key_module
72 else None
73 )
74
75
76 def get_model_spec(
77 model_zoo,
78 model_def,
79 model_params,
80 dataset_fn,
81 loss,
82 optimizer,
83 eval_metrics_fn,
84 prediction_outputs_processor,
85 ):
86 """Get the model spec items in a tuple.
87
88 The model spec tuple contains the following items in order:
89
90 * The model object instantiated with parameters specified
91 in `model_params`,
92 * The `dataset_fn`,
93 * The `loss`,
94 * The `optimizer`,
95 * The `eval_metrics_fn`,
96 * The `prediction_outputs_processor`. Note that it will print
97 warning if it's not inherited from `BasePredictionOutputsProcessor`.
98 """
99 model_def_module_file = get_module_file_path(model_zoo, model_def)
100 default_module = load_module(model_def_module_file).__dict__
101 model = load_model_from_module(model_def, default_module, model_params)
102 prediction_outputs_processor = _get_spec_value(
103 prediction_outputs_processor, model_zoo, default_module
104 )
105 if prediction_outputs_processor and not isinstance(
106 prediction_outputs_processor, BasePredictionOutputsProcessor
107 ):
108 logger.warning(
109 "prediction_outputs_processor is not "
110 "inherited from BasePredictionOutputsProcessor. "
111 "Prediction outputs may not be processed correctly."
112 )
113 return (
114 model,
115 _get_spec_value(dataset_fn, model_zoo, default_module),
116 _get_spec_value(loss, model_zoo, default_module),
117 _get_spec_value(optimizer, model_zoo, default_module),
118 _get_spec_value(eval_metrics_fn, model_zoo, default_module),
119 prediction_outputs_processor,
120 )
121
122
123 def save_checkpoint_to_file(pb_model, file_name):
124 encoded_model = pb_model.SerializeToString()
125 with open(file_name, "wb") as f:
126 f.write(encoded_model)
127
128
129 def load_from_checkpoint_file(file_name):
130 from elasticdl.proto import elasticdl_pb2
131
132 pb_model = elasticdl_pb2.Model()
133 with open(file_name, "rb") as f:
134 pb_model.ParseFromString(f.read())
135 return pb_model
136
137
138 def find_layer(model, layer_class):
139 """
140 Find all layers in model that are instances of layer_class
141 """
142 layers = []
143 for layer in model.layers:
144 if isinstance(layer, layer_class):
145 layers.append(layer)
146 elif hasattr(layer, "layers"):
147 # search in nested layers
148 layers += find_layer(layer, layer_class)
149 return layers
150
[end of elasticdl/python/common/model_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/common/model_helper.py b/elasticdl/python/common/model_helper.py
--- a/elasticdl/python/common/model_helper.py
+++ b/elasticdl/python/common/model_helper.py
@@ -14,9 +14,6 @@
return module
-# TODO: Discuss whether we need to support default model
-# function/class names such as `custom_model()`
-# or `CustomModel()`
def load_model_from_module(model_def, model_module, model_params):
model_def_name = model_def.split(".")[-1]
if model_def_name in model_module:
@@ -47,7 +44,7 @@
return os.path.join(model_zoo, "/".join(spec_key.split(".")[:-1]) + ".py")
-def _get_spec_value(spec_key, model_zoo, default_module):
+def _get_spec_value(spec_key, model_zoo, default_module, required=False):
"""Get the value to the given spec key.
Notes:
@@ -66,11 +63,17 @@
spec_key_module = load_module(
get_module_file_path(model_zoo, spec_key)
).__dict__
- return (
+ spec_value = (
spec_key_module[spec_key_base]
if spec_key_base in spec_key_module
else None
)
+ if required and spec_value is None:
+ raise Exception(
+ "Missing required spec key %s in the module: %s"
+ % (spec_key_base, spec_key)
+ )
+ return spec_value
def get_model_spec(
@@ -112,10 +115,12 @@
)
return (
model,
- _get_spec_value(dataset_fn, model_zoo, default_module),
- _get_spec_value(loss, model_zoo, default_module),
- _get_spec_value(optimizer, model_zoo, default_module),
- _get_spec_value(eval_metrics_fn, model_zoo, default_module),
+ _get_spec_value(dataset_fn, model_zoo, default_module, required=True),
+ _get_spec_value(loss, model_zoo, default_module, required=True),
+ _get_spec_value(optimizer, model_zoo, default_module, required=True),
+ _get_spec_value(
+ eval_metrics_fn, model_zoo, default_module, required=True
+ ),
prediction_outputs_processor,
)
| {"golden_diff": "diff --git a/elasticdl/python/common/model_helper.py b/elasticdl/python/common/model_helper.py\n--- a/elasticdl/python/common/model_helper.py\n+++ b/elasticdl/python/common/model_helper.py\n@@ -14,9 +14,6 @@\n return module\n \n \n-# TODO: Discuss whether we need to support default model\n-# function/class names such as `custom_model()`\n-# or `CustomModel()`\n def load_model_from_module(model_def, model_module, model_params):\n model_def_name = model_def.split(\".\")[-1]\n if model_def_name in model_module:\n@@ -47,7 +44,7 @@\n return os.path.join(model_zoo, \"/\".join(spec_key.split(\".\")[:-1]) + \".py\")\n \n \n-def _get_spec_value(spec_key, model_zoo, default_module):\n+def _get_spec_value(spec_key, model_zoo, default_module, required=False):\n \"\"\"Get the value to the given spec key.\n \n Notes:\n@@ -66,11 +63,17 @@\n spec_key_module = load_module(\n get_module_file_path(model_zoo, spec_key)\n ).__dict__\n- return (\n+ spec_value = (\n spec_key_module[spec_key_base]\n if spec_key_base in spec_key_module\n else None\n )\n+ if required and spec_value is None:\n+ raise Exception(\n+ \"Missing required spec key %s in the module: %s\"\n+ % (spec_key_base, spec_key)\n+ )\n+ return spec_value\n \n \n def get_model_spec(\n@@ -112,10 +115,12 @@\n )\n return (\n model,\n- _get_spec_value(dataset_fn, model_zoo, default_module),\n- _get_spec_value(loss, model_zoo, default_module),\n- _get_spec_value(optimizer, model_zoo, default_module),\n- _get_spec_value(eval_metrics_fn, model_zoo, default_module),\n+ _get_spec_value(dataset_fn, model_zoo, default_module, required=True),\n+ _get_spec_value(loss, model_zoo, default_module, required=True),\n+ _get_spec_value(optimizer, model_zoo, default_module, required=True),\n+ _get_spec_value(\n+ eval_metrics_fn, model_zoo, default_module, required=True\n+ ),\n prediction_outputs_processor,\n )\n", "issue": "Add check for mandatory functions in model files\nUsers are required to implement some functions(e.g, `loss()`) in the model file. We can add some checks for the model file to see if these required functions are implemented correctly. Otherwise, if the functions are not implemented (correctly), it won't be found until the job starts to run on k8s cluster.\n", "before_files": [{"content": "import importlib.util\nimport os\n\nfrom elasticdl.python.common.log_util import default_logger as logger\nfrom elasticdl.python.worker.prediction_outputs_processor import (\n BasePredictionOutputsProcessor,\n)\n\n\ndef load_module(module_file):\n spec = importlib.util.spec_from_file_location(module_file, module_file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\n# TODO: Discuss whether we need to support default model\n# function/class names such as `custom_model()`\n# or `CustomModel()`\ndef load_model_from_module(model_def, model_module, model_params):\n model_def_name = model_def.split(\".\")[-1]\n if model_def_name in model_module:\n custom_model_name = model_def_name\n else:\n raise ValueError(\n \"Cannot find the custom model function/class \"\n \"in model definition files\"\n )\n if model_params:\n kvs = model_params.split(\",\")\n model_params_dict = {}\n for kv in kvs:\n k, v = kv.split(\"=\")\n model_params_dict[k] = eval(v)\n return model_module[custom_model_name](**model_params_dict)\n else:\n return model_module[custom_model_name]()\n\n\ndef get_module_file_path(model_zoo, spec_key):\n \"\"\"Get the path to module file from model zoo and the spec string.\n\n For example, if `model_zoo = \"model_zoo\"` and\n `spec_key = \"test_module.custom_model\"`, the function returns\n \"model_zoo/test_module.py\".\n \"\"\"\n return os.path.join(model_zoo, \"/\".join(spec_key.split(\".\")[:-1]) + \".py\")\n\n\ndef _get_spec_value(spec_key, model_zoo, default_module):\n \"\"\"Get the value to the given spec key.\n\n Notes:\n\n * If the dot-splitted spec key (e.g. \"test_module.custom_model\"\n is splitted into \"test_module\" and \"custom_model\") is of length 1\n (e.g. `spec_key` is \"custom_model\"), return the value in the\n specified `default_module`.\n * If the spec key does not exist in the module, return `None`.\n \"\"\"\n spec_key_items = spec_key.split(\".\")\n spec_key_base = spec_key_items[-1]\n if len(spec_key_items) == 1:\n spec_key_module = default_module\n else:\n spec_key_module = load_module(\n get_module_file_path(model_zoo, spec_key)\n ).__dict__\n return (\n spec_key_module[spec_key_base]\n if spec_key_base in spec_key_module\n else None\n )\n\n\ndef get_model_spec(\n model_zoo,\n model_def,\n model_params,\n dataset_fn,\n loss,\n optimizer,\n eval_metrics_fn,\n prediction_outputs_processor,\n):\n \"\"\"Get the model spec items in a tuple.\n\n The model spec tuple contains the following items in order:\n\n * The model object instantiated with parameters specified\n in `model_params`,\n * The `dataset_fn`,\n * The `loss`,\n * The `optimizer`,\n * The `eval_metrics_fn`,\n * The `prediction_outputs_processor`. Note that it will print\n warning if it's not inherited from `BasePredictionOutputsProcessor`.\n \"\"\"\n model_def_module_file = get_module_file_path(model_zoo, model_def)\n default_module = load_module(model_def_module_file).__dict__\n model = load_model_from_module(model_def, default_module, model_params)\n prediction_outputs_processor = _get_spec_value(\n prediction_outputs_processor, model_zoo, default_module\n )\n if prediction_outputs_processor and not isinstance(\n prediction_outputs_processor, BasePredictionOutputsProcessor\n ):\n logger.warning(\n \"prediction_outputs_processor is not \"\n \"inherited from BasePredictionOutputsProcessor. \"\n \"Prediction outputs may not be processed correctly.\"\n )\n return (\n model,\n _get_spec_value(dataset_fn, model_zoo, default_module),\n _get_spec_value(loss, model_zoo, default_module),\n _get_spec_value(optimizer, model_zoo, default_module),\n _get_spec_value(eval_metrics_fn, model_zoo, default_module),\n prediction_outputs_processor,\n )\n\n\ndef save_checkpoint_to_file(pb_model, file_name):\n encoded_model = pb_model.SerializeToString()\n with open(file_name, \"wb\") as f:\n f.write(encoded_model)\n\n\ndef load_from_checkpoint_file(file_name):\n from elasticdl.proto import elasticdl_pb2\n\n pb_model = elasticdl_pb2.Model()\n with open(file_name, \"rb\") as f:\n pb_model.ParseFromString(f.read())\n return pb_model\n\n\ndef find_layer(model, layer_class):\n \"\"\"\n Find all layers in model that are instances of layer_class\n \"\"\"\n layers = []\n for layer in model.layers:\n if isinstance(layer, layer_class):\n layers.append(layer)\n elif hasattr(layer, \"layers\"):\n # search in nested layers\n layers += find_layer(layer, layer_class)\n return layers\n", "path": "elasticdl/python/common/model_helper.py"}]} | 2,041 | 520 |
gh_patches_debug_34834 | rasdani/github-patches | git_diff | svthalia__concrexit-3407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Canceled event registrations are marked as queued
### Describe the bug
After canceling a registration,
### How to reproduce
Steps to reproduce the behaviour:
1. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=true&queued=true
2. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=true&queued=false
3. Notice that the canceled registrations are only listed under `queued=true`, and that the queue position is 3
4. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=false&queued=true
5. Notice that the queued position 3 already exists.
### Expected behaviour
Canceled registrations should show up as canceled and not in queue, as they are not in the queue
### Screenshots
If applicable, add screenshots to help explain your problem.
### Additional context
I think the reason they are listed as queued is because the queue position is set. Ideally this field would be nulled or set to a special value like -1.
</issue>
<code>
[start of website/events/models/event_registration.py]
1 from django.core import validators
2 from django.core.exceptions import ValidationError
3 from django.db import models
4 from django.db.models import Count, F, Q
5 from django.db.models.functions import Greatest, NullIf
6 from django.utils import timezone
7 from django.utils.translation import gettext_lazy as _
8
9 from queryable_properties.managers import QueryablePropertiesManager
10 from queryable_properties.properties import AnnotationProperty
11
12 from events import emails
13 from payments.models import PaymentAmountField
14
15 from .event import Event
16
17
18 def registration_member_choices_limit():
19 """Define queryset filters to only include current members."""
20 return Q(membership__until__isnull=True) | Q(
21 membership__until__gt=timezone.now().date()
22 )
23
24
25 class EventRegistration(models.Model):
26 """Describes a registration for an Event."""
27
28 objects = QueryablePropertiesManager()
29
30 event = models.ForeignKey(Event, models.CASCADE)
31
32 member = models.ForeignKey(
33 "members.Member",
34 models.CASCADE,
35 blank=True,
36 null=True,
37 )
38
39 name = models.CharField(
40 _("name"),
41 max_length=50,
42 help_text=_("Use this for non-members"),
43 null=True,
44 blank=True,
45 )
46
47 alt_email = models.EmailField(
48 _("email"),
49 help_text=_("Email address for non-members"),
50 max_length=254,
51 null=True,
52 blank=True,
53 )
54
55 alt_phone_number = models.CharField(
56 max_length=20,
57 verbose_name=_("Phone number"),
58 help_text=_("Phone number for non-members"),
59 validators=[
60 validators.RegexValidator(
61 regex=r"^\+?\d+$",
62 message=_("Please enter a valid phone number"),
63 )
64 ],
65 null=True,
66 blank=True,
67 )
68
69 date = models.DateTimeField(_("registration date"), default=timezone.now)
70 date_cancelled = models.DateTimeField(_("cancellation date"), null=True, blank=True)
71
72 present = models.BooleanField(
73 _("present"),
74 default=False,
75 )
76
77 special_price = PaymentAmountField(
78 verbose_name=_("special price"),
79 blank=True,
80 null=True,
81 validators=[validators.MinValueValidator(0)],
82 )
83
84 remarks = models.TextField(_("remarks"), null=True, blank=True)
85
86 payment = models.OneToOneField(
87 "payments.Payment",
88 related_name="events_registration",
89 on_delete=models.SET_NULL,
90 blank=True,
91 null=True,
92 )
93
94 @property
95 def phone_number(self):
96 if self.member:
97 return self.member.profile.phone_number
98 return self.alt_phone_number
99
100 @property
101 def email(self):
102 if self.member:
103 return self.member.email
104 return self.alt_email
105
106 @property
107 def information_fields(self):
108 fields = self.event.registrationinformationfield_set.all()
109 return [
110 {"field": field, "value": field.get_value_for(self)} for field in fields
111 ]
112
113 @property
114 def is_registered(self):
115 return self.date_cancelled is None
116
117 queue_position = AnnotationProperty(
118 # Get queue position by counting amount of registrations with lower date and in case of same date lower id
119 # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue
120 NullIf(
121 Greatest(
122 Count(
123 "event__eventregistration",
124 filter=Q(event__eventregistration__date_cancelled=None)
125 & (
126 Q(event__eventregistration__date__lt=F("date"))
127 | Q(event__eventregistration__id__lte=F("id"))
128 & Q(event__eventregistration__date__exact=F("date"))
129 ),
130 )
131 - F("event__max_participants"),
132 0,
133 ),
134 0,
135 )
136 )
137
138 @property
139 def is_invited(self):
140 return self.is_registered and not self.queue_position
141
142 def is_external(self):
143 return bool(self.name)
144
145 def is_late_cancellation(self):
146 # First check whether or not the user cancelled
147 # If the user cancelled then check if this was after the deadline
148 # And if there is a max participants number:
149 # do a complex check to calculate if this user was on
150 # the waiting list at the time of cancellation, since
151 # you shouldn't need to pay the costs of something
152 # you weren't even able to go to.
153 return (
154 self.date_cancelled
155 and self.event.cancel_deadline
156 and self.date_cancelled > self.event.cancel_deadline
157 and (
158 self.event.max_participants is None
159 or self.event.eventregistration_set.filter(
160 (
161 Q(date_cancelled__gte=self.date_cancelled)
162 | Q(date_cancelled=None)
163 )
164 & Q(date__lte=self.date)
165 ).count()
166 < self.event.max_participants
167 )
168 )
169
170 def is_paid(self):
171 return self.payment
172
173 @property
174 def payment_amount(self):
175 return self.event.price if not self.special_price else self.special_price
176
177 def would_cancel_after_deadline(self):
178 now = timezone.now()
179 if not self.event.registration_required:
180 return False
181 return not self.queue_position and now >= self.event.cancel_deadline
182
183 def clean(self):
184 errors = {}
185 if (self.member is None and not self.name) or (self.member and self.name):
186 errors.update(
187 {
188 "member": _("Either specify a member or a name"),
189 "name": _("Either specify a member or a name"),
190 }
191 )
192 if self.member and self.alt_email:
193 errors.update(
194 {"alt_email": _("Email should only be specified for non-members")}
195 )
196 if self.member and self.alt_phone_number:
197 errors.update(
198 {
199 "alt_phone_number": _(
200 "Phone number should only be specified for non-members"
201 )
202 }
203 )
204 if (
205 self.payment
206 and self.special_price
207 and self.special_price != self.payment.amount
208 ):
209 errors.update(
210 {
211 "special_price": _(
212 "Cannot change price of already paid registration"
213 ),
214 }
215 )
216
217 if errors:
218 raise ValidationError(errors)
219
220 def save(self, **kwargs):
221 self.full_clean()
222
223 created = self.pk is None
224 super().save(**kwargs)
225
226 if (
227 created
228 and self.is_registered
229 and self.email
230 and self.event.registration_required
231 ):
232 if (
233 self.member is not None
234 and not self.member.profile.receive_registration_confirmation
235 ):
236 return # Don't send email if the user doesn't want them.
237
238 emails.notify_registration(self)
239
240 def __str__(self):
241 if self.member:
242 return f"{self.member.get_full_name()}: {self.event}"
243 return f"{self.name}: {self.event}"
244
245 class Meta:
246 verbose_name = _("Registration")
247 verbose_name_plural = _("Registrations")
248 ordering = ("date",)
249 unique_together = (("member", "event"),)
250
[end of website/events/models/event_registration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/models/event_registration.py b/website/events/models/event_registration.py
--- a/website/events/models/event_registration.py
+++ b/website/events/models/event_registration.py
@@ -1,7 +1,7 @@
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
-from django.db.models import Count, F, Q
+from django.db.models import Case, Count, F, Q, When
from django.db.models.functions import Greatest, NullIf
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
@@ -115,23 +115,30 @@
return self.date_cancelled is None
queue_position = AnnotationProperty(
- # Get queue position by counting amount of registrations with lower date and in case of same date lower id
- # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue
- NullIf(
- Greatest(
- Count(
- "event__eventregistration",
- filter=Q(event__eventregistration__date_cancelled=None)
- & (
- Q(event__eventregistration__date__lt=F("date"))
- | Q(event__eventregistration__id__lte=F("id"))
- & Q(event__eventregistration__date__exact=F("date"))
+ Case(
+ # Get queue position by counting amount of registrations with lower date and in case of same date lower id
+ # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue
+ # If the current registration is cancelled, also force it to None.
+ When(
+ date_cancelled=None,
+ then=NullIf(
+ Greatest(
+ Count(
+ "event__eventregistration",
+ filter=Q(event__eventregistration__date_cancelled=None)
+ & (
+ Q(event__eventregistration__date__lt=F("date"))
+ | Q(event__eventregistration__id__lte=F("id"))
+ & Q(event__eventregistration__date__exact=F("date"))
+ ),
+ )
+ - F("event__max_participants"),
+ 0,
),
- )
- - F("event__max_participants"),
- 0,
+ 0,
+ ),
),
- 0,
+ default=None,
)
)
| {"golden_diff": "diff --git a/website/events/models/event_registration.py b/website/events/models/event_registration.py\n--- a/website/events/models/event_registration.py\n+++ b/website/events/models/event_registration.py\n@@ -1,7 +1,7 @@\n from django.core import validators\n from django.core.exceptions import ValidationError\n from django.db import models\n-from django.db.models import Count, F, Q\n+from django.db.models import Case, Count, F, Q, When\n from django.db.models.functions import Greatest, NullIf\n from django.utils import timezone\n from django.utils.translation import gettext_lazy as _\n@@ -115,23 +115,30 @@\n return self.date_cancelled is None\n \n queue_position = AnnotationProperty(\n- # Get queue position by counting amount of registrations with lower date and in case of same date lower id\n- # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue\n- NullIf(\n- Greatest(\n- Count(\n- \"event__eventregistration\",\n- filter=Q(event__eventregistration__date_cancelled=None)\n- & (\n- Q(event__eventregistration__date__lt=F(\"date\"))\n- | Q(event__eventregistration__id__lte=F(\"id\"))\n- & Q(event__eventregistration__date__exact=F(\"date\"))\n+ Case(\n+ # Get queue position by counting amount of registrations with lower date and in case of same date lower id\n+ # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue\n+ # If the current registration is cancelled, also force it to None.\n+ When(\n+ date_cancelled=None,\n+ then=NullIf(\n+ Greatest(\n+ Count(\n+ \"event__eventregistration\",\n+ filter=Q(event__eventregistration__date_cancelled=None)\n+ & (\n+ Q(event__eventregistration__date__lt=F(\"date\"))\n+ | Q(event__eventregistration__id__lte=F(\"id\"))\n+ & Q(event__eventregistration__date__exact=F(\"date\"))\n+ ),\n+ )\n+ - F(\"event__max_participants\"),\n+ 0,\n ),\n- )\n- - F(\"event__max_participants\"),\n- 0,\n+ 0,\n+ ),\n ),\n- 0,\n+ default=None,\n )\n )\n", "issue": "Canceled event registrations are marked as queued\n### Describe the bug\r\nAfter canceling a registration, \r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=true&queued=true\r\n2. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=true&queued=false\r\n3. Notice that the canceled registrations are only listed under `queued=true`, and that the queue position is 3\r\n4. Go to https://staging.thalia.nu/api/v2/admin/events/183/registrations/?limit=999999999&ordering=-date_cancelled&cancelled=false&queued=true\r\n5. Notice that the queued position 3 already exists.\r\n\r\n### Expected behaviour\r\nCanceled registrations should show up as canceled and not in queue, as they are not in the queue\r\n\r\n### Screenshots\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Additional context\r\nI think the reason they are listed as queued is because the queue position is set. Ideally this field would be nulled or set to a special value like -1.\r\n\n", "before_files": [{"content": "from django.core import validators\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Count, F, Q\nfrom django.db.models.functions import Greatest, NullIf\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\n\nfrom events import emails\nfrom payments.models import PaymentAmountField\n\nfrom .event import Event\n\n\ndef registration_member_choices_limit():\n \"\"\"Define queryset filters to only include current members.\"\"\"\n return Q(membership__until__isnull=True) | Q(\n membership__until__gt=timezone.now().date()\n )\n\n\nclass EventRegistration(models.Model):\n \"\"\"Describes a registration for an Event.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n event = models.ForeignKey(Event, models.CASCADE)\n\n member = models.ForeignKey(\n \"members.Member\",\n models.CASCADE,\n blank=True,\n null=True,\n )\n\n name = models.CharField(\n _(\"name\"),\n max_length=50,\n help_text=_(\"Use this for non-members\"),\n null=True,\n blank=True,\n )\n\n alt_email = models.EmailField(\n _(\"email\"),\n help_text=_(\"Email address for non-members\"),\n max_length=254,\n null=True,\n blank=True,\n )\n\n alt_phone_number = models.CharField(\n max_length=20,\n verbose_name=_(\"Phone number\"),\n help_text=_(\"Phone number for non-members\"),\n validators=[\n validators.RegexValidator(\n regex=r\"^\\+?\\d+$\",\n message=_(\"Please enter a valid phone number\"),\n )\n ],\n null=True,\n blank=True,\n )\n\n date = models.DateTimeField(_(\"registration date\"), default=timezone.now)\n date_cancelled = models.DateTimeField(_(\"cancellation date\"), null=True, blank=True)\n\n present = models.BooleanField(\n _(\"present\"),\n default=False,\n )\n\n special_price = PaymentAmountField(\n verbose_name=_(\"special price\"),\n blank=True,\n null=True,\n validators=[validators.MinValueValidator(0)],\n )\n\n remarks = models.TextField(_(\"remarks\"), null=True, blank=True)\n\n payment = models.OneToOneField(\n \"payments.Payment\",\n related_name=\"events_registration\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n @property\n def phone_number(self):\n if self.member:\n return self.member.profile.phone_number\n return self.alt_phone_number\n\n @property\n def email(self):\n if self.member:\n return self.member.email\n return self.alt_email\n\n @property\n def information_fields(self):\n fields = self.event.registrationinformationfield_set.all()\n return [\n {\"field\": field, \"value\": field.get_value_for(self)} for field in fields\n ]\n\n @property\n def is_registered(self):\n return self.date_cancelled is None\n\n queue_position = AnnotationProperty(\n # Get queue position by counting amount of registrations with lower date and in case of same date lower id\n # Subsequently cast to None if this is 0 or lower, in which case it isn't in the queue\n NullIf(\n Greatest(\n Count(\n \"event__eventregistration\",\n filter=Q(event__eventregistration__date_cancelled=None)\n & (\n Q(event__eventregistration__date__lt=F(\"date\"))\n | Q(event__eventregistration__id__lte=F(\"id\"))\n & Q(event__eventregistration__date__exact=F(\"date\"))\n ),\n )\n - F(\"event__max_participants\"),\n 0,\n ),\n 0,\n )\n )\n\n @property\n def is_invited(self):\n return self.is_registered and not self.queue_position\n\n def is_external(self):\n return bool(self.name)\n\n def is_late_cancellation(self):\n # First check whether or not the user cancelled\n # If the user cancelled then check if this was after the deadline\n # And if there is a max participants number:\n # do a complex check to calculate if this user was on\n # the waiting list at the time of cancellation, since\n # you shouldn't need to pay the costs of something\n # you weren't even able to go to.\n return (\n self.date_cancelled\n and self.event.cancel_deadline\n and self.date_cancelled > self.event.cancel_deadline\n and (\n self.event.max_participants is None\n or self.event.eventregistration_set.filter(\n (\n Q(date_cancelled__gte=self.date_cancelled)\n | Q(date_cancelled=None)\n )\n & Q(date__lte=self.date)\n ).count()\n < self.event.max_participants\n )\n )\n\n def is_paid(self):\n return self.payment\n\n @property\n def payment_amount(self):\n return self.event.price if not self.special_price else self.special_price\n\n def would_cancel_after_deadline(self):\n now = timezone.now()\n if not self.event.registration_required:\n return False\n return not self.queue_position and now >= self.event.cancel_deadline\n\n def clean(self):\n errors = {}\n if (self.member is None and not self.name) or (self.member and self.name):\n errors.update(\n {\n \"member\": _(\"Either specify a member or a name\"),\n \"name\": _(\"Either specify a member or a name\"),\n }\n )\n if self.member and self.alt_email:\n errors.update(\n {\"alt_email\": _(\"Email should only be specified for non-members\")}\n )\n if self.member and self.alt_phone_number:\n errors.update(\n {\n \"alt_phone_number\": _(\n \"Phone number should only be specified for non-members\"\n )\n }\n )\n if (\n self.payment\n and self.special_price\n and self.special_price != self.payment.amount\n ):\n errors.update(\n {\n \"special_price\": _(\n \"Cannot change price of already paid registration\"\n ),\n }\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n self.full_clean()\n\n created = self.pk is None\n super().save(**kwargs)\n\n if (\n created\n and self.is_registered\n and self.email\n and self.event.registration_required\n ):\n if (\n self.member is not None\n and not self.member.profile.receive_registration_confirmation\n ):\n return # Don't send email if the user doesn't want them.\n\n emails.notify_registration(self)\n\n def __str__(self):\n if self.member:\n return f\"{self.member.get_full_name()}: {self.event}\"\n return f\"{self.name}: {self.event}\"\n\n class Meta:\n verbose_name = _(\"Registration\")\n verbose_name_plural = _(\"Registrations\")\n ordering = (\"date\",)\n unique_together = ((\"member\", \"event\"),)\n", "path": "website/events/models/event_registration.py"}]} | 2,960 | 526 |
gh_patches_debug_39525 | rasdani/github-patches | git_diff | lnbits__lnbits-836 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Poetry does not gracefully shutdown on CTRL+C
This is a bit critical, bad things can happen if LNbits is just killed without finishing off all routines.
Poetry with `poetry run lnbits`:
<img width="700" alt="image" src="https://user-images.githubusercontent.com/93376500/182562297-6850567b-6fb3-4003-ac9c-317d92392b04.png">
Running lnbits using `./venv/bin/uvicorn lnbits.__main__:app`:
<img width="671" alt="image" src="https://user-images.githubusercontent.com/93376500/182562341-b4d56bfa-bf4f-4981-84e6-06678923439f.png">
</issue>
<code>
[start of lnbits/server.py]
1 import click
2 import uvicorn
3
4
5 @click.command()
6 @click.option("--port", default="5000", help="Port to run LNBits on")
7 @click.option("--host", default="127.0.0.1", help="Host to run LNBits on")
8 def main(port, host):
9 """Launched with `poetry run lnbits` at root level"""
10 uvicorn.run("lnbits.__main__:app", port=port, host=host)
11
12
13 if __name__ == "__main__":
14 main()
15
16 # def main():
17 # """Launched with `poetry run start` at root level"""
18 # uvicorn.run("lnbits.__main__:app")
19
[end of lnbits/server.py]
[start of build.py]
1 import warnings
2 import subprocess
3 import glob
4 import os
5 from os import path
6 from typing import Any, List, NamedTuple, Optional
7 from pathlib import Path
8
9 LNBITS_PATH = path.dirname(path.realpath(__file__)) + "/lnbits"
10
11 def get_js_vendored(prefer_minified: bool = False) -> List[str]:
12 paths = get_vendored(".js", prefer_minified)
13
14 def sorter(key: str):
15 if "moment@" in key:
16 return 1
17 if "vue@" in key:
18 return 2
19 if "vue-router@" in key:
20 return 3
21 if "polyfills" in key:
22 return 4
23 return 9
24
25 return sorted(paths, key=sorter)
26
27
28 def get_css_vendored(prefer_minified: bool = False) -> List[str]:
29 paths = get_vendored(".css", prefer_minified)
30
31 def sorter(key: str):
32 if "quasar@" in key:
33 return 1
34 if "vue@" in key:
35 return 2
36 if "chart.js@" in key:
37 return 100
38 return 9
39
40 return sorted(paths, key=sorter)
41
42
43 def get_vendored(ext: str, prefer_minified: bool = False) -> List[str]:
44 paths: List[str] = []
45 for path in glob.glob(
46 os.path.join(LNBITS_PATH, "static/vendor/**"), recursive=True
47 ):
48 if path.endswith(".min" + ext):
49 # path is minified
50 unminified = path.replace(".min" + ext, ext)
51 if prefer_minified:
52 paths.append(path)
53 if unminified in paths:
54 paths.remove(unminified)
55 elif unminified not in paths:
56 paths.append(path)
57
58 elif path.endswith(ext):
59 # path is not minified
60 minified = path.replace(ext, ".min" + ext)
61 if not prefer_minified:
62 paths.append(path)
63 if minified in paths:
64 paths.remove(minified)
65 elif minified not in paths:
66 paths.append(path)
67
68 return sorted(paths)
69
70
71 def url_for_vendored(abspath: str) -> str:
72 return "/" + os.path.relpath(abspath, LNBITS_PATH)
73
74 def transpile_scss():
75 with warnings.catch_warnings():
76 warnings.simplefilter("ignore")
77 from scss.compiler import compile_string # type: ignore
78
79 with open(os.path.join(LNBITS_PATH, "static/scss/base.scss")) as scss:
80 with open(os.path.join(LNBITS_PATH, "static/css/base.css"), "w") as css:
81 css.write(compile_string(scss.read()))
82
83 def bundle_vendored():
84 for getfiles, outputpath in [
85 (get_js_vendored, os.path.join(LNBITS_PATH, "static/bundle.js")),
86 (get_css_vendored, os.path.join(LNBITS_PATH, "static/bundle.css")),
87 ]:
88 output = ""
89 for path in getfiles():
90 with open(path) as f:
91 output += "/* " + url_for_vendored(path) + " */\n" + f.read() + ";\n"
92 with open(outputpath, "w") as f:
93 f.write(output)
94
95
96 def build():
97 transpile_scss()
98 bundle_vendored()
99 # root = Path("lnbits/static/foo")
100 # root.mkdir(parents=True)
101 # root.joinpath("example.css").write_text("")
102
103 if __name__ == "__main__":
104 build()
105
106 #def build(setup_kwargs):
107 # """Build """
108 # transpile_scss()
109 # bundle_vendored()
110 # subprocess.run(["ls", "-la", "./lnbits/static"])
111
[end of build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -8,6 +8,7 @@
LNBITS_PATH = path.dirname(path.realpath(__file__)) + "/lnbits"
+
def get_js_vendored(prefer_minified: bool = False) -> List[str]:
paths = get_vendored(".js", prefer_minified)
@@ -71,6 +72,7 @@
def url_for_vendored(abspath: str) -> str:
return "/" + os.path.relpath(abspath, LNBITS_PATH)
+
def transpile_scss():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
@@ -80,6 +82,7 @@
with open(os.path.join(LNBITS_PATH, "static/css/base.css"), "w") as css:
css.write(compile_string(scss.read()))
+
def bundle_vendored():
for getfiles, outputpath in [
(get_js_vendored, os.path.join(LNBITS_PATH, "static/bundle.js")),
@@ -96,15 +99,7 @@
def build():
transpile_scss()
bundle_vendored()
-# root = Path("lnbits/static/foo")
-# root.mkdir(parents=True)
-# root.joinpath("example.css").write_text("")
-if __name__ == "__main__":
- build()
-#def build(setup_kwargs):
-# """Build """
-# transpile_scss()
-# bundle_vendored()
-# subprocess.run(["ls", "-la", "./lnbits/static"])
+if __name__ == "__main__":
+ build()
diff --git a/lnbits/server.py b/lnbits/server.py
--- a/lnbits/server.py
+++ b/lnbits/server.py
@@ -1,18 +1,45 @@
+import time
+
import click
import uvicorn
+from lnbits.settings import HOST, PORT
+
[email protected]()
[email protected]("--port", default="5000", help="Port to run LNBits on")
[email protected]("--host", default="127.0.0.1", help="Host to run LNBits on")
-def main(port, host):
[email protected](
+ context_settings=dict(
+ ignore_unknown_options=True,
+ allow_extra_args=True,
+ )
+)
[email protected]("--port", default=PORT, help="Port to listen on")
[email protected]("--host", default=HOST, help="Host to run LNBits on")
[email protected]("--ssl-keyfile", default=None, help="Path to SSL keyfile")
[email protected]("--ssl-certfile", default=None, help="Path to SSL certificate")
[email protected]_context
+def main(ctx, port: int, host: str, ssl_keyfile: str, ssl_certfile: str):
"""Launched with `poetry run lnbits` at root level"""
- uvicorn.run("lnbits.__main__:app", port=port, host=host)
+ # this beautiful beast parses all command line arguments and passes them to the uvicorn server
+ d = dict(
+ [
+ (
+ item[0].strip("--").replace("-", "_"),
+ int(item[1]) if item[1].isdigit() else item[1],
+ )
+ for item in zip(*[iter(ctx.args)] * 2)
+ ]
+ )
+ config = uvicorn.Config(
+ "lnbits.__main__:app",
+ port=port,
+ host=host,
+ ssl_keyfile=ssl_keyfile,
+ ssl_certfile=ssl_certfile,
+ **d
+ )
+ server = uvicorn.Server(config)
+ server.run()
if __name__ == "__main__":
main()
-
-# def main():
-# """Launched with `poetry run start` at root level"""
-# uvicorn.run("lnbits.__main__:app")
| {"golden_diff": "diff --git a/build.py b/build.py\n--- a/build.py\n+++ b/build.py\n@@ -8,6 +8,7 @@\n \n LNBITS_PATH = path.dirname(path.realpath(__file__)) + \"/lnbits\"\n \n+\n def get_js_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".js\", prefer_minified)\n \n@@ -71,6 +72,7 @@\n def url_for_vendored(abspath: str) -> str:\n return \"/\" + os.path.relpath(abspath, LNBITS_PATH)\n \n+\n def transpile_scss():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n@@ -80,6 +82,7 @@\n with open(os.path.join(LNBITS_PATH, \"static/css/base.css\"), \"w\") as css:\n css.write(compile_string(scss.read()))\n \n+\n def bundle_vendored():\n for getfiles, outputpath in [\n (get_js_vendored, os.path.join(LNBITS_PATH, \"static/bundle.js\")),\n@@ -96,15 +99,7 @@\n def build():\n transpile_scss()\n bundle_vendored()\n-# root = Path(\"lnbits/static/foo\")\n-# root.mkdir(parents=True)\n-# root.joinpath(\"example.css\").write_text(\"\")\n \n-if __name__ == \"__main__\":\n- build()\n \n-#def build(setup_kwargs):\n-# \"\"\"Build \"\"\"\n-# transpile_scss()\n-# bundle_vendored()\n-# subprocess.run([\"ls\", \"-la\", \"./lnbits/static\"])\n+if __name__ == \"__main__\":\n+ build()\ndiff --git a/lnbits/server.py b/lnbits/server.py\n--- a/lnbits/server.py\n+++ b/lnbits/server.py\n@@ -1,18 +1,45 @@\n+import time\n+\n import click\n import uvicorn\n \n+from lnbits.settings import HOST, PORT\n+\n \[email protected]()\[email protected](\"--port\", default=\"5000\", help=\"Port to run LNBits on\")\[email protected](\"--host\", default=\"127.0.0.1\", help=\"Host to run LNBits on\")\n-def main(port, host):\[email protected](\n+ context_settings=dict(\n+ ignore_unknown_options=True,\n+ allow_extra_args=True,\n+ )\n+)\[email protected](\"--port\", default=PORT, help=\"Port to listen on\")\[email protected](\"--host\", default=HOST, help=\"Host to run LNBits on\")\[email protected](\"--ssl-keyfile\", default=None, help=\"Path to SSL keyfile\")\[email protected](\"--ssl-certfile\", default=None, help=\"Path to SSL certificate\")\[email protected]_context\n+def main(ctx, port: int, host: str, ssl_keyfile: str, ssl_certfile: str):\n \"\"\"Launched with `poetry run lnbits` at root level\"\"\"\n- uvicorn.run(\"lnbits.__main__:app\", port=port, host=host)\n+ # this beautiful beast parses all command line arguments and passes them to the uvicorn server\n+ d = dict(\n+ [\n+ (\n+ item[0].strip(\"--\").replace(\"-\", \"_\"),\n+ int(item[1]) if item[1].isdigit() else item[1],\n+ )\n+ for item in zip(*[iter(ctx.args)] * 2)\n+ ]\n+ )\n+ config = uvicorn.Config(\n+ \"lnbits.__main__:app\",\n+ port=port,\n+ host=host,\n+ ssl_keyfile=ssl_keyfile,\n+ ssl_certfile=ssl_certfile,\n+ **d\n+ )\n+ server = uvicorn.Server(config)\n+ server.run()\n \n \n if __name__ == \"__main__\":\n main()\n-\n-# def main():\n-# \"\"\"Launched with `poetry run start` at root level\"\"\"\n-# uvicorn.run(\"lnbits.__main__:app\")\n", "issue": "Poetry does not gracefully shutdown on CTRL+C\nThis is a bit critical, bad things can happen if LNbits is just killed without finishing off all routines.\r\n\r\nPoetry with `poetry run lnbits`:\r\n<img width=\"700\" alt=\"image\" src=\"https://user-images.githubusercontent.com/93376500/182562297-6850567b-6fb3-4003-ac9c-317d92392b04.png\">\r\n\r\nRunning lnbits using `./venv/bin/uvicorn lnbits.__main__:app`:\r\n<img width=\"671\" alt=\"image\" src=\"https://user-images.githubusercontent.com/93376500/182562341-b4d56bfa-bf4f-4981-84e6-06678923439f.png\">\r\n\n", "before_files": [{"content": "import click\nimport uvicorn\n\n\[email protected]()\[email protected](\"--port\", default=\"5000\", help=\"Port to run LNBits on\")\[email protected](\"--host\", default=\"127.0.0.1\", help=\"Host to run LNBits on\")\ndef main(port, host):\n \"\"\"Launched with `poetry run lnbits` at root level\"\"\"\n uvicorn.run(\"lnbits.__main__:app\", port=port, host=host)\n\n\nif __name__ == \"__main__\":\n main()\n\n# def main():\n# \"\"\"Launched with `poetry run start` at root level\"\"\"\n# uvicorn.run(\"lnbits.__main__:app\")\n", "path": "lnbits/server.py"}, {"content": "import warnings\nimport subprocess\nimport glob\nimport os\nfrom os import path\nfrom typing import Any, List, NamedTuple, Optional\nfrom pathlib import Path\n\nLNBITS_PATH = path.dirname(path.realpath(__file__)) + \"/lnbits\"\n\ndef get_js_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".js\", prefer_minified)\n\n def sorter(key: str):\n if \"moment@\" in key:\n return 1\n if \"vue@\" in key:\n return 2\n if \"vue-router@\" in key:\n return 3\n if \"polyfills\" in key:\n return 4\n return 9\n\n return sorted(paths, key=sorter)\n\n\ndef get_css_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".css\", prefer_minified)\n\n def sorter(key: str):\n if \"quasar@\" in key:\n return 1\n if \"vue@\" in key:\n return 2\n if \"chart.js@\" in key:\n return 100\n return 9\n\n return sorted(paths, key=sorter)\n\n\ndef get_vendored(ext: str, prefer_minified: bool = False) -> List[str]:\n paths: List[str] = []\n for path in glob.glob(\n os.path.join(LNBITS_PATH, \"static/vendor/**\"), recursive=True\n ):\n if path.endswith(\".min\" + ext):\n # path is minified\n unminified = path.replace(\".min\" + ext, ext)\n if prefer_minified:\n paths.append(path)\n if unminified in paths:\n paths.remove(unminified)\n elif unminified not in paths:\n paths.append(path)\n\n elif path.endswith(ext):\n # path is not minified\n minified = path.replace(ext, \".min\" + ext)\n if not prefer_minified:\n paths.append(path)\n if minified in paths:\n paths.remove(minified)\n elif minified not in paths:\n paths.append(path)\n\n return sorted(paths)\n\n\ndef url_for_vendored(abspath: str) -> str:\n return \"/\" + os.path.relpath(abspath, LNBITS_PATH)\n\ndef transpile_scss():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from scss.compiler import compile_string # type: ignore\n\n with open(os.path.join(LNBITS_PATH, \"static/scss/base.scss\")) as scss:\n with open(os.path.join(LNBITS_PATH, \"static/css/base.css\"), \"w\") as css:\n css.write(compile_string(scss.read()))\n\ndef bundle_vendored():\n for getfiles, outputpath in [\n (get_js_vendored, os.path.join(LNBITS_PATH, \"static/bundle.js\")),\n (get_css_vendored, os.path.join(LNBITS_PATH, \"static/bundle.css\")),\n ]:\n output = \"\"\n for path in getfiles():\n with open(path) as f:\n output += \"/* \" + url_for_vendored(path) + \" */\\n\" + f.read() + \";\\n\"\n with open(outputpath, \"w\") as f:\n f.write(output)\n\n\ndef build():\n transpile_scss()\n bundle_vendored()\n# root = Path(\"lnbits/static/foo\")\n# root.mkdir(parents=True)\n# root.joinpath(\"example.css\").write_text(\"\")\n\nif __name__ == \"__main__\":\n build()\n\n#def build(setup_kwargs):\n# \"\"\"Build \"\"\"\n# transpile_scss()\n# bundle_vendored()\n# subprocess.run([\"ls\", \"-la\", \"./lnbits/static\"])\n", "path": "build.py"}]} | 1,982 | 881 |
gh_patches_debug_29767 | rasdani/github-patches | git_diff | rotki__rotki-5054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rotki 1.26.0 + windows: Unable to open database
## Problem Definition
Premium windows users started getting this after upgrading to 1.26.0.
It's probably related to the changes I did here: https://github.com/rotki/rotki/pull/5043 but manifests only in Windows as none else saw it when using/testing in other OSes.
## Logs
```
[28/10/2022 18:14:05 W. Europe Daylight Time] ERROR rotkehlchen.greenlets Greenlet with id 3094640951536: Upload data to server died with exception: unable to open database: C:\Users\isido\AppData\Local\Temp\tmpp2jyr7sq.db.
Exception Name: <class 'pysqlcipher3.dbapi2.OperationalError'>
Exception Info: unable to open database: C:\Users\isido\AppData\Local\Temp\tmpp2jyr7sq.db
Traceback:
File "src\\gevent\\greenlet.py", line 908, in gevent._gevent_cgreenlet.Greenlet.run
File "rotkehlchen\premium\sync.py", line 143, in maybe_upload_data_to_server
File "rotkehlchen\data_handler.py", line 198, in compress_and_encrypt_db
File "rotkehlchen\db\dbhandler.py", line 483, in export_unencrypted
File "rotkehlchen\db\drivers\gevent.py", line 264, in executescript
[28/10/2022 18:14:05 W. Europe Daylight Time] ERROR rotkehlchen.user_messages Greenlet with id 3094640951536: Upload data to server died with exception: unable to open database: C:\Users\isido\AppData\Local\Temp\tmpp2jyr7sq.db. Check the logs for more details
```
### System Description
Operating system: Windows
Rotki version: 1.26.0
</issue>
<code>
[start of rotkehlchen/data_handler.py]
1 import base64
2 import hashlib
3 import logging
4 import shutil
5 import tempfile
6 import zlib
7 from pathlib import Path
8 from typing import Dict, List, Optional, Tuple
9
10 from rotkehlchen.assets.asset import Asset
11 from rotkehlchen.crypto import decrypt, encrypt
12 from rotkehlchen.db.dbhandler import DBHandler
13 from rotkehlchen.db.settings import ModifiableDBSettings
14 from rotkehlchen.errors.api import AuthenticationError
15 from rotkehlchen.errors.misc import SystemPermissionError
16 from rotkehlchen.logging import RotkehlchenLogsAdapter
17 from rotkehlchen.types import B64EncodedBytes, B64EncodedString
18 from rotkehlchen.user_messages import MessagesAggregator
19 from rotkehlchen.utils.misc import timestamp_to_date, ts_now
20
21 logger = logging.getLogger(__name__)
22 log = RotkehlchenLogsAdapter(logger)
23
24 BUFFERSIZE = 64 * 1024
25
26
27 class DataHandler():
28
29 def __init__(
30 self,
31 data_directory: Path,
32 msg_aggregator: MessagesAggregator,
33 sql_vm_instructions_cb: int,
34 ):
35 self.logged_in = False
36 self.data_directory = data_directory
37 self.username = 'no_user'
38 self.password = ''
39 self.msg_aggregator = msg_aggregator
40 self.sql_vm_instructions_cb = sql_vm_instructions_cb
41
42 def logout(self) -> None:
43 if self.logged_in:
44 self.username = 'no_user'
45 self.password = ''
46 self.user_data_dir: Optional[Path] = None
47 db = getattr(self, 'db', None)
48 if db is not None:
49 with self.db.conn.read_ctx() as cursor:
50 self.db.update_owned_assets_in_globaldb(cursor)
51 self.db.logout()
52 self.logged_in = False
53
54 def change_password(self, new_password: str) -> bool:
55 success: bool = False
56
57 if self.logged_in:
58 success = self.db.change_password(new_password)
59 self.password = new_password
60
61 return success
62
63 def unlock(
64 self,
65 username: str,
66 password: str,
67 create_new: bool,
68 initial_settings: Optional[ModifiableDBSettings] = None,
69 ) -> Path:
70 """Unlocks a user, either logging them in or creating a new user
71
72 May raise:
73 - SystemPermissionError if there are permission errors when accessing the DB
74 or a directory in the user's filesystem
75 - AuthenticationError if the given user does not exist, or if
76 sqlcipher version problems are detected
77 - DBUpgradeError if the rotki DB version is newer than the software or
78 there is a DB upgrade and there is an error or if the version is older
79 than the one supported.
80 """
81 user_data_dir = self.data_directory / username
82 if create_new:
83 try:
84 if (user_data_dir / 'rotkehlchen.db').exists():
85 raise AuthenticationError(
86 f'User {username} already exists. User data dir: {user_data_dir}',
87 )
88
89 user_data_dir.mkdir(exist_ok=True)
90 except PermissionError as e:
91 raise SystemPermissionError(
92 f'Failed to create directory for user: {str(e)}',
93 ) from e
94
95 else:
96 try:
97 if not user_data_dir.exists():
98 raise AuthenticationError('User {} does not exist'.format(username))
99
100 if not (user_data_dir / 'rotkehlchen.db').exists():
101 raise PermissionError
102
103 except PermissionError as e:
104 # This is bad. User directory exists but database is missing.
105 # Or either DB or user directory can't be accessed due to permissions
106 # Make a backup of the directory that user should probably remove
107 # on their own. At the same time delete the directory so that a new
108 # user account can be created
109 shutil.move(
110 user_data_dir,
111 self.data_directory / f'auto_backup_{username}_{ts_now()}',
112 )
113
114 raise SystemPermissionError(
115 'User {} exists but DB is missing. Somehow must have been manually '
116 'deleted or is corrupt or access permissions do not allow reading. '
117 'Please recreate the user account. '
118 'A backup of the user directory was created.'.format(username),
119 ) from e
120
121 self.db: DBHandler = DBHandler(
122 user_data_dir=user_data_dir,
123 password=password,
124 msg_aggregator=self.msg_aggregator,
125 initial_settings=initial_settings,
126 sql_vm_instructions_cb=self.sql_vm_instructions_cb,
127 )
128 self.user_data_dir = user_data_dir
129 self.logged_in = True
130 self.username = username
131 self.password = password
132 return user_data_dir
133
134 def add_ignored_assets(self, assets: List[Asset]) -> Tuple[Optional[List[Asset]], str]:
135 """Adds ignored assets to the DB.
136
137 If any of the given assets is already in the DB the function does nothing
138 and returns an error message.
139 """
140 with self.db.conn.read_ctx() as cursor:
141 ignored_assets = self.db.get_ignored_assets(cursor)
142 for asset in assets:
143 if asset in ignored_assets:
144 msg = f'{asset.identifier} is already in ignored assets'
145 return None, msg
146
147 with self.db.user_write() as write_cursor:
148 for asset in assets:
149 self.db.add_to_ignored_assets(write_cursor=write_cursor, asset=asset)
150
151 return self.db.get_ignored_assets(cursor), ''
152
153 def remove_ignored_assets(self, assets: List[Asset]) -> Tuple[Optional[List[Asset]], str]:
154 """Removes ignored assets from the DB.
155
156 If any of the given assets is not in the DB the call function does nothing
157 and returns an error message.
158 """
159 with self.db.conn.read_ctx() as cursor:
160 ignored_assets = self.db.get_ignored_assets(cursor)
161 for asset in assets:
162 if asset not in ignored_assets:
163 msg = f'{asset.identifier} is not in ignored assets'
164 return None, msg
165
166 with self.db.user_write() as write_cursor:
167 for asset in assets:
168 self.db.remove_from_ignored_assets(write_cursor=write_cursor, asset=asset)
169
170 return self.db.get_ignored_assets(cursor), ''
171
172 def get_users(self) -> Dict[str, str]:
173 """Returns a dict with all users in the system.
174
175 Each key is a user's name and the value is denoting whether that
176 particular user is logged in or not
177 """
178 users = {}
179 for x in self.data_directory.iterdir():
180 try:
181 if x.is_dir() and (x / 'rotkehlchen.db').exists():
182 users[x.stem] = 'loggedin' if x.stem == self.username else 'loggedout'
183 except PermissionError:
184 # ignore directories that can't be accessed
185 continue
186
187 return users
188
189 def compress_and_encrypt_db(self, password: str) -> Tuple[B64EncodedBytes, str]:
190 """Decrypt the DB, dump in temporary plaintextdb, compress it,
191 and then re-encrypt it
192
193 Returns a b64 encoded binary blob"""
194 log.info('Compress and encrypt DB')
195 compressor = zlib.compressobj(level=9)
196 with tempfile.NamedTemporaryFile(delete=True, suffix='.db') as tempdbfile:
197 tempdb = Path(tempdbfile.name)
198 self.db.export_unencrypted(tempdb)
199 source_data = bytearray()
200 compressed_data = bytearray()
201 with open(tempdb, 'rb') as src_f:
202 block = src_f.read(BUFFERSIZE)
203 while block:
204 source_data += block
205 compressed_data += compressor.compress(block)
206 block = src_f.read(BUFFERSIZE)
207
208 compressed_data += compressor.flush()
209
210 original_data_hash = base64.b64encode(
211 hashlib.sha256(source_data).digest(),
212 ).decode()
213 encrypted_data = encrypt(password.encode(), bytes(compressed_data))
214 return B64EncodedBytes(encrypted_data.encode()), original_data_hash
215
216 def decompress_and_decrypt_db(self, password: str, encrypted_data: B64EncodedString) -> None:
217 """Decrypt and decompress the encrypted data we receive from the server
218
219 If successful then replace our local Database
220
221 May Raise:
222 - UnableToDecryptRemoteData due to decrypt()
223 - DBUpgradeError if the rotki DB version is newer than the software or
224 there is a DB upgrade and there is an error or if the version is older
225 than the one supported.
226 - SystemPermissionError if the DB file permissions are not correct
227 """
228 log.info('Decompress and decrypt DB')
229 # First make a backup of the DB we are about to replace
230 date = timestamp_to_date(ts=ts_now(), formatstr='%Y_%m_%d_%H_%M_%S', treat_as_local=True)
231 shutil.copyfile(
232 self.data_directory / self.username / 'rotkehlchen.db',
233 self.data_directory / self.username / f'rotkehlchen_db_{date}.backup',
234 )
235
236 decrypted_data = decrypt(password.encode(), encrypted_data)
237 decompressed_data = zlib.decompress(decrypted_data)
238 self.db.import_unencrypted(decompressed_data, password)
239
[end of rotkehlchen/data_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rotkehlchen/data_handler.py b/rotkehlchen/data_handler.py
--- a/rotkehlchen/data_handler.py
+++ b/rotkehlchen/data_handler.py
@@ -193,12 +193,13 @@
Returns a b64 encoded binary blob"""
log.info('Compress and encrypt DB')
compressor = zlib.compressobj(level=9)
- with tempfile.NamedTemporaryFile(delete=True, suffix='.db') as tempdbfile:
- tempdb = Path(tempdbfile.name)
- self.db.export_unencrypted(tempdb)
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.db') as tempdbfile:
+ tempdbpath = Path(tempdbfile.name)
+ tempdbfile.close() # close the file to allow re-opening by export_unencrypted in windows https://github.com/rotki/rotki/issues/5051 # noqa: E501
+ self.db.export_unencrypted(tempdbpath)
source_data = bytearray()
compressed_data = bytearray()
- with open(tempdb, 'rb') as src_f:
+ with open(tempdbpath, 'rb') as src_f:
block = src_f.read(BUFFERSIZE)
while block:
source_data += block
@@ -211,6 +212,8 @@
hashlib.sha256(source_data).digest(),
).decode()
encrypted_data = encrypt(password.encode(), bytes(compressed_data))
+ # cleanup temp file to avoid windows problem (https://github.com/rotki/rotki/issues/5051)
+ tempdbpath.unlink()
return B64EncodedBytes(encrypted_data.encode()), original_data_hash
def decompress_and_decrypt_db(self, password: str, encrypted_data: B64EncodedString) -> None:
| {"golden_diff": "diff --git a/rotkehlchen/data_handler.py b/rotkehlchen/data_handler.py\n--- a/rotkehlchen/data_handler.py\n+++ b/rotkehlchen/data_handler.py\n@@ -193,12 +193,13 @@\n Returns a b64 encoded binary blob\"\"\"\n log.info('Compress and encrypt DB')\n compressor = zlib.compressobj(level=9)\n- with tempfile.NamedTemporaryFile(delete=True, suffix='.db') as tempdbfile:\n- tempdb = Path(tempdbfile.name)\n- self.db.export_unencrypted(tempdb)\n+ with tempfile.NamedTemporaryFile(delete=False, suffix='.db') as tempdbfile:\n+ tempdbpath = Path(tempdbfile.name)\n+ tempdbfile.close() # close the file to allow re-opening by export_unencrypted in windows https://github.com/rotki/rotki/issues/5051 # noqa: E501\n+ self.db.export_unencrypted(tempdbpath)\n source_data = bytearray()\n compressed_data = bytearray()\n- with open(tempdb, 'rb') as src_f:\n+ with open(tempdbpath, 'rb') as src_f:\n block = src_f.read(BUFFERSIZE)\n while block:\n source_data += block\n@@ -211,6 +212,8 @@\n hashlib.sha256(source_data).digest(),\n ).decode()\n encrypted_data = encrypt(password.encode(), bytes(compressed_data))\n+ # cleanup temp file to avoid windows problem (https://github.com/rotki/rotki/issues/5051)\n+ tempdbpath.unlink()\n return B64EncodedBytes(encrypted_data.encode()), original_data_hash\n \n def decompress_and_decrypt_db(self, password: str, encrypted_data: B64EncodedString) -> None:\n", "issue": "rotki 1.26.0 + windows: Unable to open database\n## Problem Definition\r\n\r\nPremium windows users started getting this after upgrading to 1.26.0.\r\n\r\nIt's probably related to the changes I did here: https://github.com/rotki/rotki/pull/5043 but manifests only in Windows as none else saw it when using/testing in other OSes.\r\n\r\n## Logs\r\n\r\n```\r\n[28/10/2022 18:14:05 W. Europe Daylight Time] ERROR rotkehlchen.greenlets Greenlet with id 3094640951536: Upload data to server died with exception: unable to open database: C:\\Users\\isido\\AppData\\Local\\Temp\\tmpp2jyr7sq.db.\r\nException Name: <class 'pysqlcipher3.dbapi2.OperationalError'>\r\nException Info: unable to open database: C:\\Users\\isido\\AppData\\Local\\Temp\\tmpp2jyr7sq.db\r\nTraceback:\r\n File \"src\\\\gevent\\\\greenlet.py\", line 908, in gevent._gevent_cgreenlet.Greenlet.run\r\n File \"rotkehlchen\\premium\\sync.py\", line 143, in maybe_upload_data_to_server\r\n File \"rotkehlchen\\data_handler.py\", line 198, in compress_and_encrypt_db\r\n File \"rotkehlchen\\db\\dbhandler.py\", line 483, in export_unencrypted\r\n File \"rotkehlchen\\db\\drivers\\gevent.py\", line 264, in executescript\r\n\r\n[28/10/2022 18:14:05 W. Europe Daylight Time] ERROR rotkehlchen.user_messages Greenlet with id 3094640951536: Upload data to server died with exception: unable to open database: C:\\Users\\isido\\AppData\\Local\\Temp\\tmpp2jyr7sq.db. Check the logs for more details\r\n```\r\n\r\n\r\n\r\n### System Description\r\n\r\n\r\nOperating system: Windows\r\nRotki version: 1.26.0\r\n\n", "before_files": [{"content": "import base64\nimport hashlib\nimport logging\nimport shutil\nimport tempfile\nimport zlib\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.crypto import decrypt, encrypt\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.db.settings import ModifiableDBSettings\nfrom rotkehlchen.errors.api import AuthenticationError\nfrom rotkehlchen.errors.misc import SystemPermissionError\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.types import B64EncodedBytes, B64EncodedString\nfrom rotkehlchen.user_messages import MessagesAggregator\nfrom rotkehlchen.utils.misc import timestamp_to_date, ts_now\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\nBUFFERSIZE = 64 * 1024\n\n\nclass DataHandler():\n\n def __init__(\n self,\n data_directory: Path,\n msg_aggregator: MessagesAggregator,\n sql_vm_instructions_cb: int,\n ):\n self.logged_in = False\n self.data_directory = data_directory\n self.username = 'no_user'\n self.password = ''\n self.msg_aggregator = msg_aggregator\n self.sql_vm_instructions_cb = sql_vm_instructions_cb\n\n def logout(self) -> None:\n if self.logged_in:\n self.username = 'no_user'\n self.password = ''\n self.user_data_dir: Optional[Path] = None\n db = getattr(self, 'db', None)\n if db is not None:\n with self.db.conn.read_ctx() as cursor:\n self.db.update_owned_assets_in_globaldb(cursor)\n self.db.logout()\n self.logged_in = False\n\n def change_password(self, new_password: str) -> bool:\n success: bool = False\n\n if self.logged_in:\n success = self.db.change_password(new_password)\n self.password = new_password\n\n return success\n\n def unlock(\n self,\n username: str,\n password: str,\n create_new: bool,\n initial_settings: Optional[ModifiableDBSettings] = None,\n ) -> Path:\n \"\"\"Unlocks a user, either logging them in or creating a new user\n\n May raise:\n - SystemPermissionError if there are permission errors when accessing the DB\n or a directory in the user's filesystem\n - AuthenticationError if the given user does not exist, or if\n sqlcipher version problems are detected\n - DBUpgradeError if the rotki DB version is newer than the software or\n there is a DB upgrade and there is an error or if the version is older\n than the one supported.\n \"\"\"\n user_data_dir = self.data_directory / username\n if create_new:\n try:\n if (user_data_dir / 'rotkehlchen.db').exists():\n raise AuthenticationError(\n f'User {username} already exists. User data dir: {user_data_dir}',\n )\n\n user_data_dir.mkdir(exist_ok=True)\n except PermissionError as e:\n raise SystemPermissionError(\n f'Failed to create directory for user: {str(e)}',\n ) from e\n\n else:\n try:\n if not user_data_dir.exists():\n raise AuthenticationError('User {} does not exist'.format(username))\n\n if not (user_data_dir / 'rotkehlchen.db').exists():\n raise PermissionError\n\n except PermissionError as e:\n # This is bad. User directory exists but database is missing.\n # Or either DB or user directory can't be accessed due to permissions\n # Make a backup of the directory that user should probably remove\n # on their own. At the same time delete the directory so that a new\n # user account can be created\n shutil.move(\n user_data_dir,\n self.data_directory / f'auto_backup_{username}_{ts_now()}',\n )\n\n raise SystemPermissionError(\n 'User {} exists but DB is missing. Somehow must have been manually '\n 'deleted or is corrupt or access permissions do not allow reading. '\n 'Please recreate the user account. '\n 'A backup of the user directory was created.'.format(username),\n ) from e\n\n self.db: DBHandler = DBHandler(\n user_data_dir=user_data_dir,\n password=password,\n msg_aggregator=self.msg_aggregator,\n initial_settings=initial_settings,\n sql_vm_instructions_cb=self.sql_vm_instructions_cb,\n )\n self.user_data_dir = user_data_dir\n self.logged_in = True\n self.username = username\n self.password = password\n return user_data_dir\n\n def add_ignored_assets(self, assets: List[Asset]) -> Tuple[Optional[List[Asset]], str]:\n \"\"\"Adds ignored assets to the DB.\n\n If any of the given assets is already in the DB the function does nothing\n and returns an error message.\n \"\"\"\n with self.db.conn.read_ctx() as cursor:\n ignored_assets = self.db.get_ignored_assets(cursor)\n for asset in assets:\n if asset in ignored_assets:\n msg = f'{asset.identifier} is already in ignored assets'\n return None, msg\n\n with self.db.user_write() as write_cursor:\n for asset in assets:\n self.db.add_to_ignored_assets(write_cursor=write_cursor, asset=asset)\n\n return self.db.get_ignored_assets(cursor), ''\n\n def remove_ignored_assets(self, assets: List[Asset]) -> Tuple[Optional[List[Asset]], str]:\n \"\"\"Removes ignored assets from the DB.\n\n If any of the given assets is not in the DB the call function does nothing\n and returns an error message.\n \"\"\"\n with self.db.conn.read_ctx() as cursor:\n ignored_assets = self.db.get_ignored_assets(cursor)\n for asset in assets:\n if asset not in ignored_assets:\n msg = f'{asset.identifier} is not in ignored assets'\n return None, msg\n\n with self.db.user_write() as write_cursor:\n for asset in assets:\n self.db.remove_from_ignored_assets(write_cursor=write_cursor, asset=asset)\n\n return self.db.get_ignored_assets(cursor), ''\n\n def get_users(self) -> Dict[str, str]:\n \"\"\"Returns a dict with all users in the system.\n\n Each key is a user's name and the value is denoting whether that\n particular user is logged in or not\n \"\"\"\n users = {}\n for x in self.data_directory.iterdir():\n try:\n if x.is_dir() and (x / 'rotkehlchen.db').exists():\n users[x.stem] = 'loggedin' if x.stem == self.username else 'loggedout'\n except PermissionError:\n # ignore directories that can't be accessed\n continue\n\n return users\n\n def compress_and_encrypt_db(self, password: str) -> Tuple[B64EncodedBytes, str]:\n \"\"\"Decrypt the DB, dump in temporary plaintextdb, compress it,\n and then re-encrypt it\n\n Returns a b64 encoded binary blob\"\"\"\n log.info('Compress and encrypt DB')\n compressor = zlib.compressobj(level=9)\n with tempfile.NamedTemporaryFile(delete=True, suffix='.db') as tempdbfile:\n tempdb = Path(tempdbfile.name)\n self.db.export_unencrypted(tempdb)\n source_data = bytearray()\n compressed_data = bytearray()\n with open(tempdb, 'rb') as src_f:\n block = src_f.read(BUFFERSIZE)\n while block:\n source_data += block\n compressed_data += compressor.compress(block)\n block = src_f.read(BUFFERSIZE)\n\n compressed_data += compressor.flush()\n\n original_data_hash = base64.b64encode(\n hashlib.sha256(source_data).digest(),\n ).decode()\n encrypted_data = encrypt(password.encode(), bytes(compressed_data))\n return B64EncodedBytes(encrypted_data.encode()), original_data_hash\n\n def decompress_and_decrypt_db(self, password: str, encrypted_data: B64EncodedString) -> None:\n \"\"\"Decrypt and decompress the encrypted data we receive from the server\n\n If successful then replace our local Database\n\n May Raise:\n - UnableToDecryptRemoteData due to decrypt()\n - DBUpgradeError if the rotki DB version is newer than the software or\n there is a DB upgrade and there is an error or if the version is older\n than the one supported.\n - SystemPermissionError if the DB file permissions are not correct\n \"\"\"\n log.info('Decompress and decrypt DB')\n # First make a backup of the DB we are about to replace\n date = timestamp_to_date(ts=ts_now(), formatstr='%Y_%m_%d_%H_%M_%S', treat_as_local=True)\n shutil.copyfile(\n self.data_directory / self.username / 'rotkehlchen.db',\n self.data_directory / self.username / f'rotkehlchen_db_{date}.backup',\n )\n\n decrypted_data = decrypt(password.encode(), encrypted_data)\n decompressed_data = zlib.decompress(decrypted_data)\n self.db.import_unencrypted(decompressed_data, password)\n", "path": "rotkehlchen/data_handler.py"}]} | 3,593 | 399 |
gh_patches_debug_32972 | rasdani/github-patches | git_diff | cleanlab__cleanlab-980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revert version upper bound of termcolor dependency
## Background
In #905 , an upper bound to the version of termcolor was added. This was a temporary fix to pass CI on existing PRs.
https://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/setup.py#L108
Without the upper bound, the following tests fail on Python 3.8+
https://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/tests/test_token_classification.py#L134-L169
Here's a screenshot of a CI run with these tests failing:
<img width="1142" alt="image" src="https://github.com/cleanlab/cleanlab/assets/18127060/2d371cf4-0d73-49e7-8392-df3384c46a71">
It looks like the `colored` variable no longer has the
## Task
The version limits need to be reverted to
```python
"termcolor>=2.0.0",
```
but the affected unit tests need to pass as well when the new release of termcolor ([version 2.4.0](https://github.com/termcolor/termcolor/releases/tag/2.4.0)) is installed.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from setuptools.command.egg_info import egg_info
3
4 # To use a consistent encoding
5 from codecs import open
6 from os import path
7
8
9 class egg_info_ex(egg_info):
10 """Includes license file into `.egg-info` folder."""
11
12 def run(self):
13 # don't duplicate license into `.egg-info` when building a distribution
14 if not self.distribution.have_run.get("install", True):
15 # `install` command is in progress, copy license
16 self.mkpath(self.egg_info)
17 self.copy_file("LICENSE", self.egg_info)
18
19 egg_info.run(self)
20
21
22 here = path.abspath(path.dirname(__file__))
23
24 # Get the long description from the README file
25 with open(path.join(here, "README.md"), encoding="utf-8") as f:
26 long_description = f.read()
27
28 # Get version number and store it in __version__
29 exec(open("cleanlab/version.py").read())
30
31 DATALAB_REQUIRE = [
32 # Mainly for Datalab's data storage class.
33 # Still some type hints that require datasets
34 "datasets>=2.7.0",
35 ]
36
37 IMAGE_REQUIRE = DATALAB_REQUIRE + ["cleanvision>=0.3.2"]
38
39 EXTRAS_REQUIRE = {
40 "datalab": DATALAB_REQUIRE,
41 "image": IMAGE_REQUIRE,
42 "all": ["matplotlib>=3.5.1"],
43 }
44 EXTRAS_REQUIRE["all"] = list(set(sum(EXTRAS_REQUIRE.values(), [])))
45
46 setup(
47 name="cleanlab",
48 version=__version__,
49 license="AGPLv3+",
50 long_description=long_description,
51 long_description_content_type="text/markdown",
52 description="The standard package for data-centric AI, machine learning with label errors, "
53 "and automatically finding and fixing dataset issues in Python.",
54 url="https://cleanlab.ai",
55 project_urls={
56 "Documentation": "https://docs.cleanlab.ai",
57 "Bug Tracker": "https://github.com/cleanlab/cleanlab/issues",
58 "Source Code": "https://github.com/cleanlab/cleanlab",
59 },
60 author="Cleanlab Inc.",
61 author_email="[email protected]",
62 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
63 classifiers=[
64 "Development Status :: 4 - Beta",
65 "Intended Audience :: Developers",
66 "Intended Audience :: Education",
67 "Intended Audience :: Science/Research",
68 "Intended Audience :: Information Technology",
69 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
70 "Natural Language :: English",
71 # We believe this package works will these versions, but we do not guarantee it!
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.7",
74 "Programming Language :: Python :: 3.8",
75 "Programming Language :: Python :: 3.9",
76 "Programming Language :: Python :: 3.10",
77 "Programming Language :: Python",
78 "Topic :: Software Development",
79 "Topic :: Scientific/Engineering",
80 "Topic :: Scientific/Engineering :: Mathematics",
81 "Topic :: Scientific/Engineering :: Artificial Intelligence",
82 "Topic :: Software Development :: Libraries",
83 "Topic :: Software Development :: Libraries :: Python Modules",
84 ],
85 python_requires=">=3.7",
86 # What does your project relate to?
87 keywords="machine_learning data_cleaning confident_learning classification weak_supervision "
88 "learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric",
89 # You can just specify the packages manually here if your project is
90 # simple. Or you can use find_packages().
91 packages=find_packages(exclude=[]),
92 # Include cleanlab license file.
93 include_package_data=True,
94 package_data={
95 "": ["LICENSE"],
96 },
97 license_files=("LICENSE",),
98 cmdclass={"egg_info": egg_info_ex},
99 # List run-time dependencies here. These will be installed by pip when
100 # your project is installed. For an analysis of "install_requires" vs pip's
101 # requirements files see:
102 # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
103 install_requires=[
104 "numpy>=1.20.0",
105 "scikit-learn>=1.0",
106 "tqdm>=4.53.0",
107 "pandas>=1.1.5",
108 "termcolor>=2.0.0,<2.4.0",
109 ],
110 extras_require=EXTRAS_REQUIRE,
111 )
112
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,10 +70,10 @@
"Natural Language :: English",
# We believe this package works will these versions, but we do not guarantee it!
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
@@ -82,7 +82,7 @@
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
- python_requires=">=3.7",
+ python_requires=">=3.8",
# What does your project relate to?
keywords="machine_learning data_cleaning confident_learning classification weak_supervision "
"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric",
@@ -101,11 +101,11 @@
# requirements files see:
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
install_requires=[
- "numpy>=1.20.0",
- "scikit-learn>=1.0",
+ "numpy>=1.22.0",
+ "scikit-learn>=1.1",
"tqdm>=4.53.0",
- "pandas>=1.1.5",
- "termcolor>=2.0.0,<2.4.0",
+ "pandas>=1.4.0",
+ "termcolor>=2.4.0",
],
extras_require=EXTRAS_REQUIRE,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,10 +70,10 @@\n \"Natural Language :: English\",\n # We believe this package works will these versions, but we do not guarantee it!\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n+ \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Topic :: Scientific/Engineering\",\n@@ -82,7 +82,7 @@\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n- python_requires=\">=3.7\",\n+ python_requires=\">=3.8\",\n # What does your project relate to?\n keywords=\"machine_learning data_cleaning confident_learning classification weak_supervision \"\n \"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric\",\n@@ -101,11 +101,11 @@\n # requirements files see:\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/\n install_requires=[\n- \"numpy>=1.20.0\",\n- \"scikit-learn>=1.0\",\n+ \"numpy>=1.22.0\",\n+ \"scikit-learn>=1.1\",\n \"tqdm>=4.53.0\",\n- \"pandas>=1.1.5\",\n- \"termcolor>=2.0.0,<2.4.0\",\n+ \"pandas>=1.4.0\",\n+ \"termcolor>=2.4.0\",\n ],\n extras_require=EXTRAS_REQUIRE,\n )\n", "issue": "Revert version upper bound of termcolor dependency\n## Background\r\n\r\nIn #905 , an upper bound to the version of termcolor was added. This was a temporary fix to pass CI on existing PRs.\r\n\r\nhttps://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/setup.py#L108\r\n\r\nWithout the upper bound, the following tests fail on Python 3.8+\r\nhttps://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/tests/test_token_classification.py#L134-L169\r\n\r\nHere's a screenshot of a CI run with these tests failing:\r\n\r\n<img width=\"1142\" alt=\"image\" src=\"https://github.com/cleanlab/cleanlab/assets/18127060/2d371cf4-0d73-49e7-8392-df3384c46a71\">\r\n\r\nIt looks like the `colored`\u00a0 variable no longer has the\r\n\r\n## Task\r\n\r\n\r\nThe version limits need to be reverted to\r\n\r\n```python\r\n \"termcolor>=2.0.0\", \r\n```\r\n\r\nbut the affected unit tests need to pass as well when the new release of termcolor ([version 2.4.0](https://github.com/termcolor/termcolor/releases/tag/2.4.0)) is installed.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom setuptools.command.egg_info import egg_info\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n\nclass egg_info_ex(egg_info):\n \"\"\"Includes license file into `.egg-info` folder.\"\"\"\n\n def run(self):\n # don't duplicate license into `.egg-info` when building a distribution\n if not self.distribution.have_run.get(\"install\", True):\n # `install` command is in progress, copy license\n self.mkpath(self.egg_info)\n self.copy_file(\"LICENSE\", self.egg_info)\n\n egg_info.run(self)\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n# Get version number and store it in __version__\nexec(open(\"cleanlab/version.py\").read())\n\nDATALAB_REQUIRE = [\n # Mainly for Datalab's data storage class.\n # Still some type hints that require datasets\n \"datasets>=2.7.0\",\n]\n\nIMAGE_REQUIRE = DATALAB_REQUIRE + [\"cleanvision>=0.3.2\"]\n\nEXTRAS_REQUIRE = {\n \"datalab\": DATALAB_REQUIRE,\n \"image\": IMAGE_REQUIRE,\n \"all\": [\"matplotlib>=3.5.1\"],\n}\nEXTRAS_REQUIRE[\"all\"] = list(set(sum(EXTRAS_REQUIRE.values(), [])))\n\nsetup(\n name=\"cleanlab\",\n version=__version__,\n license=\"AGPLv3+\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n description=\"The standard package for data-centric AI, machine learning with label errors, \"\n \"and automatically finding and fixing dataset issues in Python.\",\n url=\"https://cleanlab.ai\",\n project_urls={\n \"Documentation\": \"https://docs.cleanlab.ai\",\n \"Bug Tracker\": \"https://github.com/cleanlab/cleanlab/issues\",\n \"Source Code\": \"https://github.com/cleanlab/cleanlab\",\n },\n author=\"Cleanlab Inc.\",\n author_email=\"[email protected]\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Natural Language :: English\",\n # We believe this package works will these versions, but we do not guarantee it!\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n python_requires=\">=3.7\",\n # What does your project relate to?\n keywords=\"machine_learning data_cleaning confident_learning classification weak_supervision \"\n \"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[]),\n # Include cleanlab license file.\n include_package_data=True,\n package_data={\n \"\": [\"LICENSE\"],\n },\n license_files=(\"LICENSE\",),\n cmdclass={\"egg_info\": egg_info_ex},\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/\n install_requires=[\n \"numpy>=1.20.0\",\n \"scikit-learn>=1.0\",\n \"tqdm>=4.53.0\",\n \"pandas>=1.1.5\",\n \"termcolor>=2.0.0,<2.4.0\",\n ],\n extras_require=EXTRAS_REQUIRE,\n)\n", "path": "setup.py"}]} | 2,099 | 427 |
gh_patches_debug_646 | rasdani/github-patches | git_diff | pex-tool__pex-2034 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.120
On the docket:
+ [x] Support REPL command history #2019
+ [x] Using --complete-platform with --resolve-local-platforms should build sdists when local platform provides a subset of complete-platforms #2026
+ [x] A loose layout, venv-with-symlink PEX creates brittle symlinks #2023
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.119"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.119"
+__version__ = "2.1.120"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.119\"\n+__version__ = \"2.1.120\"\n", "issue": "Release 2.1.120\nOn the docket:\r\n+ [x] Support REPL command history #2019 \r\n+ [x] Using --complete-platform with --resolve-local-platforms should build sdists when local platform provides a subset of complete-platforms #2026\r\n+ [x] A loose layout, venv-with-symlink PEX creates brittle symlinks #2023\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.119\"\n", "path": "pex/version.py"}]} | 673 | 98 |
gh_patches_debug_5626 | rasdani/github-patches | git_diff | modin-project__modin-4553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TEST: "build docs" CI action is broken with SphinxWarning: Invalid configuration value found: 'language = None'.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS Monterey
- **Modin version** (`modin.__version__`): 0477c688972bee89250d75835ff543db27f2e0d5
- **Python version**: 3.9.12
- **Code we can use to reproduce**:
`cd docs && sphinx-build -T -E -W -b html . build`
### Describe the problem
Building docs fails with SphinxWarning: Invalid configuration value found: 'language = None'. Fails with latest Sphinx version, 5.0.1, as well as previous version, 5.0.0.
### Source code / logs
<details>
<summary>Stack trace </summary>
```
Running Sphinx v5.0.0
Traceback (most recent call last):
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/cmd/build.py", line 272, in build_main
app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/application.py", line 202, in __init__
self.config = Config.read(self.confdir, confoverrides or {}, self.tags)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/config.py", line 172, in read
logger.warning(__("Invalid configuration value found: 'language = None'. "
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1812, in warning
self.log(WARNING, msg, *args, **kwargs)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/util/logging.py", line 122, in log
super().log(level, msg, *args, **kwargs)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1844, in log
self.logger.log(level, msg, *args, **kwargs)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1512, in log
self._log(level, msg, args, **kwargs)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 948, in handle
rv = self.filter(record)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py", line 806, in filter
result = f.filter(record)
File "/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/util/logging.py", line 425, in filter
raise exc
sphinx.errors.SphinxWarning: Invalid configuration value found: 'language = None'. Update your configuration to a valid langauge code. Falling back to 'en' (English).
Warning, treated as error:
Invalid configuration value found: 'language = None'. Update your configuration to a valid langauge code. Falling back to 'en' (English).
```
</details>
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/stable/config
8
9 # -- Project information -----------------------------------------------------
10 import sys
11 import os
12 import types
13
14 import ray
15
16 # stub ray.remote to be a no-op so it doesn't shadow docstrings
17 def noop_decorator(*args, **kwargs):
18 if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
19 # This is the case where the decorator is just @ray.remote without parameters.
20 return args[0]
21 return lambda cls_or_func: cls_or_func
22
23
24 ray.remote = noop_decorator
25
26 # fake modules if they're missing
27 for mod_name in ("cudf", "cupy", "pyarrow.gandiva", "omniscidbe"):
28 try:
29 __import__(mod_name)
30 except ImportError:
31 sys.modules[mod_name] = types.ModuleType(
32 mod_name, f"fake {mod_name} for building docs"
33 )
34 if not hasattr(sys.modules["cudf"], "DataFrame"):
35 sys.modules["cudf"].DataFrame = type("DataFrame", (object,), {})
36 if not hasattr(sys.modules["cupy"], "ndarray"):
37 sys.modules["cupy"].ndarray = type("ndarray", (object,), {})
38 if not hasattr(sys.modules["omniscidbe"], "PyDbEngine"):
39 sys.modules["omniscidbe"].PyDbEngine = type("PyDbEngine", (object,), {})
40
41 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
42 import modin
43
44 from modin.config.__main__ import export_config_help
45
46 configs_file_path = os.path.abspath(
47 os.path.join(os.path.dirname(__file__), "flow/modin/configs_help.csv")
48 )
49 # Export configs help to create configs table in the docs/flow/modin/config.rst
50 export_config_help(configs_file_path)
51
52 project = "Modin"
53 copyright = "2018-2022, Modin Developers."
54 author = "Modin contributors"
55
56 # The short X.Y version
57 version = "{}".format(modin.__version__)
58 # The full version, including alpha/beta/rc tags
59 release = version
60
61
62 # -- General configuration ---------------------------------------------------
63
64 # If your documentation needs a minimal Sphinx version, state it here.
65 #
66 # needs_sphinx = '1.0'
67
68 # Add any Sphinx extension module names here, as strings. They can be
69 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
70 # ones.
71 extensions = [
72 "sphinx.ext.autodoc",
73 "sphinx.ext.napoleon",
74 "sphinx.ext.intersphinx",
75 "sphinx.ext.todo",
76 "sphinx.ext.mathjax",
77 "sphinx.ext.githubpages",
78 "sphinx.ext.graphviz",
79 "sphinxcontrib.plantuml",
80 "sphinx_issues",
81 ]
82
83
84 # Add any paths that contain templates here, relative to this directory.
85 templates_path = ["_templates"]
86
87 # The suffix(es) of source filenames.
88 # You can specify multiple suffix as a list of string:
89 #
90 # source_suffix = ['.rst', '.md']
91 source_suffix = ".rst"
92
93 # The master toctree document.
94 master_doc = "index"
95
96 # The language for content autogenerated by Sphinx. Refer to documentation
97 # for a list of supported languages.
98 #
99 # This is also used if you do content translation via gettext catalogs.
100 # Usually you set "language" from the command line for these cases.
101 language = None
102
103 # List of patterns, relative to source directory, that match files and
104 # directories to ignore when looking for source files.
105 # This pattern also affects html_static_path and html_extra_path .
106 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
107
108 # The name of the Pygments (syntax highlighting) style to use.
109 pygments_style = "sphinx"
110
111
112 # -- Options for HTML output -------------------------------------------------
113
114 # Maps git branches to Sphinx themes
115 default_html_theme = "pydata_sphinx_theme"
116 current_branch = "nature"
117
118 # The theme to use for HTML and HTML Help pages. See the documentation for
119 # a list of builtin themes.
120 #
121 html_theme = "pydata_sphinx_theme"
122
123 html_favicon = "img/MODIN_ver2.ico"
124
125 html_logo = "img/MODIN_ver2.png"
126
127 # Theme options are theme-specific and customize the look and feel of a theme
128 # further. For a list of options available for each theme, see the
129 # documentation.
130 #
131 html_theme_options = {
132 "sidebarwidth": 270,
133 "collapse_navigation": False,
134 "navigation_depth": 4,
135 "show_toc_level": 2,
136 "github_url": "https://github.com/modin-project/modin",
137 "icon_links": [
138 {
139 "name": "PyPI",
140 "url": "https://pypi.org/project/modin",
141 "icon": "fab fa-python",
142 },
143 {
144 "name": "conda-forge",
145 "url": "https://anaconda.org/conda-forge/modin",
146 "icon": "fas fa-circle-notch",
147 },
148 {
149 "name": "Join the Slack",
150 "url": "https://modin.org/slack.html",
151 "icon": "fab fa-slack",
152 },
153 {
154 "name": "Discourse",
155 "url": "https://discuss.modin.org/",
156 "icon": "fab fa-discourse",
157 },
158 {
159 "name": "Mailing List",
160 "url": "https://groups.google.com/forum/#!forum/modin-dev",
161 "icon": "fas fa-envelope-square",
162 },
163 ],
164 }
165
166 # Custom sidebar templates, must be a dictionary that maps document names
167 # to template names.
168 #
169 # The default sidebars (for documents that don't match any pattern) are
170 # defined by theme itself. Builtin themes are using these templates by
171 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
172 # 'searchbox.html']``.
173 #
174 # The default pydata_sphinx_theme sidebar templates are
175 # sidebar-nav-bs.html and search-field.html.
176 html_sidebars = {}
177
178 issues_github_path = "modin-project/modin"
179
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -98,7 +98,7 @@
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = None
+language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -98,7 +98,7 @@\n #\n # This is also used if you do content translation via gettext catalogs.\n # Usually you set \"language\" from the command line for these cases.\n-language = None\n+language = \"en\"\n \n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n", "issue": "TEST: \"build docs\" CI action is broken with SphinxWarning: Invalid configuration value found: 'language = None'.\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS Monterey\r\n- **Modin version** (`modin.__version__`): 0477c688972bee89250d75835ff543db27f2e0d5\r\n- **Python version**: 3.9.12\r\n- **Code we can use to reproduce**:\r\n`cd docs && sphinx-build -T -E -W -b html . build`\r\n\r\n\r\n\r\n### Describe the problem\r\nBuilding docs fails with SphinxWarning: Invalid configuration value found: 'language = None'. Fails with latest Sphinx version, 5.0.1, as well as previous version, 5.0.0.\r\n\r\n### Source code / logs\r\n\r\n<details>\r\n<summary>Stack trace </summary>\r\n\r\n```\r\nRunning Sphinx v5.0.0\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/cmd/build.py\", line 272, in build_main\r\n app = Sphinx(args.sourcedir, args.confdir, args.outputdir,\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/application.py\", line 202, in __init__\r\n self.config = Config.read(self.confdir, confoverrides or {}, self.tags)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/config.py\", line 172, in read\r\n logger.warning(__(\"Invalid configuration value found: 'language = None'. \"\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1812, in warning\r\n self.log(WARNING, msg, *args, **kwargs)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/util/logging.py\", line 122, in log\r\n super().log(level, msg, *args, **kwargs)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1844, in log\r\n self.logger.log(level, msg, *args, **kwargs)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1512, in log\r\n self._log(level, msg, args, **kwargs)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1589, in _log\r\n self.handle(record)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1599, in handle\r\n self.callHandlers(record)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 1661, in callHandlers\r\n hdlr.handle(record)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 948, in handle\r\n rv = self.filter(record)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/logging/__init__.py\", line 806, in filter\r\n result = f.filter(record)\r\n File \"/Users/maheshvashishtha/opt/anaconda3/envs/modin-latest/lib/python3.9/site-packages/sphinx/util/logging.py\", line 425, in filter\r\n raise exc\r\nsphinx.errors.SphinxWarning: Invalid configuration value found: 'language = None'. Update your configuration to a valid langauge code. Falling back to 'en' (English).\r\n\r\nWarning, treated as error:\r\nInvalid configuration value found: 'language = None'. Update your configuration to a valid langauge code. Falling back to 'en' (English).\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Project information -----------------------------------------------------\nimport sys\nimport os\nimport types\n\nimport ray\n\n# stub ray.remote to be a no-op so it doesn't shadow docstrings\ndef noop_decorator(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # This is the case where the decorator is just @ray.remote without parameters.\n return args[0]\n return lambda cls_or_func: cls_or_func\n\n\nray.remote = noop_decorator\n\n# fake modules if they're missing\nfor mod_name in (\"cudf\", \"cupy\", \"pyarrow.gandiva\", \"omniscidbe\"):\n try:\n __import__(mod_name)\n except ImportError:\n sys.modules[mod_name] = types.ModuleType(\n mod_name, f\"fake {mod_name} for building docs\"\n )\nif not hasattr(sys.modules[\"cudf\"], \"DataFrame\"):\n sys.modules[\"cudf\"].DataFrame = type(\"DataFrame\", (object,), {})\nif not hasattr(sys.modules[\"cupy\"], \"ndarray\"):\n sys.modules[\"cupy\"].ndarray = type(\"ndarray\", (object,), {})\nif not hasattr(sys.modules[\"omniscidbe\"], \"PyDbEngine\"):\n sys.modules[\"omniscidbe\"].PyDbEngine = type(\"PyDbEngine\", (object,), {})\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nimport modin\n\nfrom modin.config.__main__ import export_config_help\n\nconfigs_file_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"flow/modin/configs_help.csv\")\n)\n# Export configs help to create configs table in the docs/flow/modin/config.rst\nexport_config_help(configs_file_path)\n\nproject = \"Modin\"\ncopyright = \"2018-2022, Modin Developers.\"\nauthor = \"Modin contributors\"\n\n# The short X.Y version\nversion = \"{}\".format(modin.__version__)\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.graphviz\",\n \"sphinxcontrib.plantuml\",\n \"sphinx_issues\",\n]\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# Maps git branches to Sphinx themes\ndefault_html_theme = \"pydata_sphinx_theme\"\ncurrent_branch = \"nature\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pydata_sphinx_theme\"\n\nhtml_favicon = \"img/MODIN_ver2.ico\"\n\nhtml_logo = \"img/MODIN_ver2.png\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"sidebarwidth\": 270,\n \"collapse_navigation\": False,\n \"navigation_depth\": 4,\n \"show_toc_level\": 2,\n \"github_url\": \"https://github.com/modin-project/modin\",\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n \"url\": \"https://pypi.org/project/modin\",\n \"icon\": \"fab fa-python\",\n },\n {\n \"name\": \"conda-forge\",\n \"url\": \"https://anaconda.org/conda-forge/modin\",\n \"icon\": \"fas fa-circle-notch\",\n },\n {\n \"name\": \"Join the Slack\",\n \"url\": \"https://modin.org/slack.html\",\n \"icon\": \"fab fa-slack\",\n },\n {\n \"name\": \"Discourse\",\n \"url\": \"https://discuss.modin.org/\",\n \"icon\": \"fab fa-discourse\",\n },\n {\n \"name\": \"Mailing List\",\n \"url\": \"https://groups.google.com/forum/#!forum/modin-dev\",\n \"icon\": \"fas fa-envelope-square\",\n },\n ],\n}\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# The default pydata_sphinx_theme sidebar templates are\n# sidebar-nav-bs.html and search-field.html.\nhtml_sidebars = {}\n\nissues_github_path = \"modin-project/modin\"\n", "path": "docs/conf.py"}]} | 3,360 | 102 |
gh_patches_debug_12495 | rasdani/github-patches | git_diff | geopandas__geopandas-478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: coordinate indexer does not correctly handle slice start/stop of 0
When slicing a geoseries using the coordinate indexer, a slice start/stop of 0 is ignored.
```
>>> gs = gpd.GeoSeries([Point(x, x) for x in range(-3, 4)])
>>> print(gs)
0 POINT (-3 -3)
1 POINT (-2 -2)
2 POINT (-1 -1)
3 POINT (0 0)
4 POINT (1 1)
5 POINT (2 2)
6 POINT (3 3)
dtype: object
```
When attempting to select points located with x/y < 0, you get:
```
>>> print(gs.cx[:0, :0])
0 POINT (-3 -3)
1 POINT (-2 -2)
2 POINT (-1 -1)
3 POINT (0 0)
4 POINT (1 1)
5 POINT (2 2)
6 POINT (3 3)
dtype: object
```
whereas the expected behavior is:
```
>>> print(gs.cx[:0, :0])
0 POINT (-3 -3)
1 POINT (-2 -2)
2 POINT (-1 -1)
3 POINT (0 0)
```
</issue>
<code>
[start of geopandas/geoseries.py]
1 from functools import partial
2 import json
3 from warnings import warn
4
5 import numpy as np
6 from pandas import Series, DataFrame
7 from pandas.core.indexing import _NDFrameIndexer
8
9 import pyproj
10 from shapely.geometry import box, shape, Polygon, Point
11 from shapely.geometry.collection import GeometryCollection
12 from shapely.geometry.base import BaseGeometry
13 from shapely.ops import transform
14
15 from geopandas.plotting import plot_series
16 from geopandas.base import GeoPandasBase, _series_unary_op
17
18
19 def _is_empty(x):
20 try:
21 return x.is_empty
22 except:
23 return False
24
25
26 class _CoordinateIndexer(_NDFrameIndexer):
27 """ Indexing by coordinate slices """
28 def _getitem_tuple(self, tup):
29 obj = self.obj
30 xs, ys = tup
31 # handle numeric values as x and/or y coordinate index
32 if type(xs) is not slice:
33 xs = slice(xs, xs)
34 if type(ys) is not slice:
35 ys = slice(ys, ys)
36 # don't know how to handle step; should this raise?
37 if xs.step is not None or ys.step is not None:
38 warn("Ignoring step - full interval is used.")
39 xmin, ymin, xmax, ymax = obj.total_bounds
40 bbox = box(xs.start or xmin,
41 ys.start or ymin,
42 xs.stop or xmax,
43 ys.stop or ymax)
44 idx = obj.intersects(bbox)
45 return obj[idx]
46
47
48 class GeoSeries(GeoPandasBase, Series):
49 """A Series object designed to store shapely geometry objects."""
50 _metadata = ['name', 'crs']
51
52 def __new__(cls, *args, **kwargs):
53 kwargs.pop('crs', None)
54 arr = Series.__new__(cls)
55 if type(arr) is GeoSeries:
56 return arr
57 else:
58 return arr.view(GeoSeries)
59
60 def __init__(self, *args, **kwargs):
61 # fix problem for scalar geometries passed
62 if len(args) == 1 and isinstance(args[0], BaseGeometry):
63 args = ([args[0]],)
64
65 crs = kwargs.pop('crs', None)
66
67 super(GeoSeries, self).__init__(*args, **kwargs)
68 self.crs = crs
69 self._invalidate_sindex()
70
71 def append(self, *args, **kwargs):
72 return self._wrapped_pandas_method('append', *args, **kwargs)
73
74 @property
75 def geometry(self):
76 return self
77
78 @property
79 def x(self):
80 """Return the x location of point geometries in a GeoSeries"""
81 if (self.geom_type == "Point").all():
82 return _series_unary_op(self, 'x', null_value=np.nan)
83 else:
84 message = "x attribute access only provided for Point geometries"
85 raise ValueError(message)
86
87 @property
88 def y(self):
89 """Return the y location of point geometries in a GeoSeries"""
90 if (self.geom_type == "Point").all():
91 return _series_unary_op(self, 'y', null_value=np.nan)
92 else:
93 message = "y attribute access only provided for Point geometries"
94 raise ValueError(message)
95
96 @classmethod
97 def from_file(cls, filename, **kwargs):
98 """
99 Alternate constructor to create a GeoSeries from a file
100
101 Parameters
102 ----------
103
104 filename : str
105 File path or file handle to read from. Depending on which kwargs
106 are included, the content of filename may vary, see:
107 http://toblerity.github.io/fiona/README.html#usage
108 for usage details.
109 kwargs : key-word arguments
110 These arguments are passed to fiona.open, and can be used to
111 access multi-layer data, data stored within archives (zip files),
112 etc.
113
114 """
115 import fiona
116 geoms = []
117 with fiona.open(filename, **kwargs) as f:
118 crs = f.crs
119 for rec in f:
120 geoms.append(shape(rec['geometry']))
121 g = GeoSeries(geoms)
122 g.crs = crs
123 return g
124
125 @property
126 def __geo_interface__(self):
127 """Returns a GeoSeries as a python feature collection
128 """
129 from geopandas import GeoDataFrame
130 return GeoDataFrame({'geometry': self}).__geo_interface__
131
132 def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
133 from geopandas import GeoDataFrame
134 data = GeoDataFrame({"geometry": self,
135 "id":self.index.values},
136 index=self.index)
137 data.crs = self.crs
138 data.to_file(filename, driver, **kwargs)
139
140 #
141 # Implement pandas methods
142 #
143
144 @property
145 def _constructor(self):
146 return GeoSeries
147
148 def _wrapped_pandas_method(self, mtd, *args, **kwargs):
149 """Wrap a generic pandas method to ensure it returns a GeoSeries"""
150 val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)
151 if type(val) == Series:
152 val.__class__ = GeoSeries
153 val.crs = self.crs
154 val._invalidate_sindex()
155 return val
156
157 def __getitem__(self, key):
158 return self._wrapped_pandas_method('__getitem__', key)
159
160 def sort_index(self, *args, **kwargs):
161 return self._wrapped_pandas_method('sort_index', *args, **kwargs)
162
163 def take(self, *args, **kwargs):
164 return self._wrapped_pandas_method('take', *args, **kwargs)
165
166 def select(self, *args, **kwargs):
167 return self._wrapped_pandas_method('select', *args, **kwargs)
168
169 @property
170 def _can_hold_na(self):
171 return False
172
173 def __finalize__(self, other, method=None, **kwargs):
174 """ propagate metadata from other to self """
175 # NOTE: backported from pandas master (upcoming v0.13)
176 for name in self._metadata:
177 object.__setattr__(self, name, getattr(other, name, None))
178 return self
179
180 def copy(self, order='C'):
181 """
182 Make a copy of this GeoSeries object
183
184 Parameters
185 ----------
186 deep : boolean, default True
187 Make a deep copy, i.e. also copy data
188
189 Returns
190 -------
191 copy : GeoSeries
192 """
193 # FIXME: this will likely be unnecessary in pandas >= 0.13
194 return GeoSeries(self.values.copy(order), index=self.index,
195 name=self.name).__finalize__(self)
196
197 def isnull(self):
198 """Null values in a GeoSeries are represented by empty geometric objects"""
199 non_geo_null = super(GeoSeries, self).isnull()
200 val = self.apply(_is_empty)
201 return np.logical_or(non_geo_null, val)
202
203 def fillna(self, value=None, method=None, inplace=False,
204 **kwargs):
205 """Fill NA/NaN values with a geometry (empty polygon by default).
206
207 "method" is currently not implemented for pandas <= 0.12.
208 """
209 if value is None:
210 value = Point()
211 return super(GeoSeries, self).fillna(value=value, method=method,
212 inplace=inplace, **kwargs)
213
214 def align(self, other, join='outer', level=None, copy=True,
215 fill_value=None, **kwargs):
216 if fill_value is None:
217 fill_value = Point()
218 left, right = super(GeoSeries, self).align(other, join=join,
219 level=level, copy=copy,
220 fill_value=fill_value,
221 **kwargs)
222 if isinstance(other, GeoSeries):
223 return GeoSeries(left), GeoSeries(right)
224 else: # It is probably a Series, let's keep it that way
225 return GeoSeries(left), right
226
227
228 def __contains__(self, other):
229 """Allow tests of the form "geom in s"
230
231 Tests whether a GeoSeries contains a geometry.
232
233 Note: This is not the same as the geometric method "contains".
234 """
235 if isinstance(other, BaseGeometry):
236 return np.any(self.geom_equals(other))
237 else:
238 return False
239
240 def plot(self, *args, **kwargs):
241 return plot_series(self, *args, **kwargs)
242
243 plot.__doc__ = plot_series.__doc__
244
245 #
246 # Additional methods
247 #
248
249 def to_crs(self, crs=None, epsg=None):
250 """Transform geometries to a new coordinate reference system
251
252 This method will transform all points in all objects. It has
253 no notion or projecting entire geometries. All segments
254 joining points are assumed to be lines in the current
255 projection, not geodesics. Objects crossing the dateline (or
256 other projection boundary) will have undesirable behavior.
257
258 `to_crs` passes the `crs` argument to the `Proj` function from the
259 `pyproj` library (with the option `preserve_units=True`). It can
260 therefore accept proj4 projections in any format
261 supported by `Proj`, including dictionaries, or proj4 strings.
262
263 """
264 from fiona.crs import from_epsg
265 if self.crs is None:
266 raise ValueError('Cannot transform naive geometries. '
267 'Please set a crs on the object first.')
268 if crs is None:
269 try:
270 crs = from_epsg(epsg)
271 except TypeError:
272 raise TypeError('Must set either crs or epsg for output.')
273 proj_in = pyproj.Proj(self.crs, preserve_units=True)
274 proj_out = pyproj.Proj(crs, preserve_units=True)
275 project = partial(pyproj.transform, proj_in, proj_out)
276 result = self.apply(lambda geom: transform(project, geom))
277 result.__class__ = GeoSeries
278 result.crs = crs
279 result._invalidate_sindex()
280 return result
281
282 def to_json(self, **kwargs):
283 """
284 Returns a GeoJSON string representation of the GeoSeries.
285
286 Parameters
287 ----------
288 *kwargs* that will be passed to json.dumps().
289 """
290 return json.dumps(self.__geo_interface__, **kwargs)
291
292 #
293 # Implement standard operators for GeoSeries
294 #
295
296 def __xor__(self, other):
297 """Implement ^ operator as for builtin set type"""
298 return self.symmetric_difference(other)
299
300 def __or__(self, other):
301 """Implement | operator as for builtin set type"""
302 return self.union(other)
303
304 def __and__(self, other):
305 """Implement & operator as for builtin set type"""
306 return self.intersection(other)
307
308 def __sub__(self, other):
309 """Implement - operator as for builtin set type"""
310 return self.difference(other)
311
312 GeoSeries._create_indexer('cx', _CoordinateIndexer)
313
[end of geopandas/geoseries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py
--- a/geopandas/geoseries.py
+++ b/geopandas/geoseries.py
@@ -37,10 +37,10 @@
if xs.step is not None or ys.step is not None:
warn("Ignoring step - full interval is used.")
xmin, ymin, xmax, ymax = obj.total_bounds
- bbox = box(xs.start or xmin,
- ys.start or ymin,
- xs.stop or xmax,
- ys.stop or ymax)
+ bbox = box(xs.start if xs.start is not None else xmin,
+ ys.start if ys.start is not None else ymin,
+ xs.stop if xs.stop is not None else xmax,
+ ys.stop if ys.stop is not None else ymax)
idx = obj.intersects(bbox)
return obj[idx]
| {"golden_diff": "diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py\n--- a/geopandas/geoseries.py\n+++ b/geopandas/geoseries.py\n@@ -37,10 +37,10 @@\n if xs.step is not None or ys.step is not None:\n warn(\"Ignoring step - full interval is used.\")\n xmin, ymin, xmax, ymax = obj.total_bounds\n- bbox = box(xs.start or xmin,\n- ys.start or ymin,\n- xs.stop or xmax,\n- ys.stop or ymax)\n+ bbox = box(xs.start if xs.start is not None else xmin,\n+ ys.start if ys.start is not None else ymin,\n+ xs.stop if xs.stop is not None else xmax,\n+ ys.stop if ys.stop is not None else ymax)\n idx = obj.intersects(bbox)\n return obj[idx]\n", "issue": "BUG: coordinate indexer does not correctly handle slice start/stop of 0\nWhen slicing a geoseries using the coordinate indexer, a slice start/stop of 0 is ignored.\r\n\r\n```\r\n>>> gs = gpd.GeoSeries([Point(x, x) for x in range(-3, 4)])\r\n>>> print(gs)\r\n0 POINT (-3 -3)\r\n1 POINT (-2 -2)\r\n2 POINT (-1 -1)\r\n3 POINT (0 0)\r\n4 POINT (1 1)\r\n5 POINT (2 2)\r\n6 POINT (3 3)\r\ndtype: object\r\n```\r\nWhen attempting to select points located with x/y < 0, you get:\r\n```\r\n>>> print(gs.cx[:0, :0])\r\n0 POINT (-3 -3)\r\n1 POINT (-2 -2)\r\n2 POINT (-1 -1)\r\n3 POINT (0 0)\r\n4 POINT (1 1)\r\n5 POINT (2 2)\r\n6 POINT (3 3)\r\ndtype: object\r\n```\r\nwhereas the expected behavior is:\r\n```\r\n>>> print(gs.cx[:0, :0])\r\n0 POINT (-3 -3)\r\n1 POINT (-2 -2)\r\n2 POINT (-1 -1)\r\n3 POINT (0 0)\r\n```\n", "before_files": [{"content": "from functools import partial\nimport json\nfrom warnings import warn\n\nimport numpy as np\nfrom pandas import Series, DataFrame\nfrom pandas.core.indexing import _NDFrameIndexer\n\nimport pyproj\nfrom shapely.geometry import box, shape, Polygon, Point\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.geometry.base import BaseGeometry\nfrom shapely.ops import transform\n\nfrom geopandas.plotting import plot_series\nfrom geopandas.base import GeoPandasBase, _series_unary_op\n\n\ndef _is_empty(x):\n try:\n return x.is_empty\n except:\n return False\n\n\nclass _CoordinateIndexer(_NDFrameIndexer):\n \"\"\" Indexing by coordinate slices \"\"\"\n def _getitem_tuple(self, tup):\n obj = self.obj\n xs, ys = tup\n # handle numeric values as x and/or y coordinate index\n if type(xs) is not slice:\n xs = slice(xs, xs)\n if type(ys) is not slice:\n ys = slice(ys, ys)\n # don't know how to handle step; should this raise?\n if xs.step is not None or ys.step is not None:\n warn(\"Ignoring step - full interval is used.\")\n xmin, ymin, xmax, ymax = obj.total_bounds\n bbox = box(xs.start or xmin,\n ys.start or ymin,\n xs.stop or xmax,\n ys.stop or ymax)\n idx = obj.intersects(bbox)\n return obj[idx]\n\n\nclass GeoSeries(GeoPandasBase, Series):\n \"\"\"A Series object designed to store shapely geometry objects.\"\"\"\n _metadata = ['name', 'crs']\n\n def __new__(cls, *args, **kwargs):\n kwargs.pop('crs', None)\n arr = Series.__new__(cls)\n if type(arr) is GeoSeries:\n return arr\n else:\n return arr.view(GeoSeries)\n\n def __init__(self, *args, **kwargs):\n # fix problem for scalar geometries passed\n if len(args) == 1 and isinstance(args[0], BaseGeometry):\n args = ([args[0]],)\n\n crs = kwargs.pop('crs', None)\n\n super(GeoSeries, self).__init__(*args, **kwargs)\n self.crs = crs\n self._invalidate_sindex()\n\n def append(self, *args, **kwargs):\n return self._wrapped_pandas_method('append', *args, **kwargs)\n\n @property\n def geometry(self):\n return self\n\n @property\n def x(self):\n \"\"\"Return the x location of point geometries in a GeoSeries\"\"\"\n if (self.geom_type == \"Point\").all():\n return _series_unary_op(self, 'x', null_value=np.nan)\n else:\n message = \"x attribute access only provided for Point geometries\"\n raise ValueError(message)\n\n @property\n def y(self):\n \"\"\"Return the y location of point geometries in a GeoSeries\"\"\"\n if (self.geom_type == \"Point\").all():\n return _series_unary_op(self, 'y', null_value=np.nan)\n else:\n message = \"y attribute access only provided for Point geometries\"\n raise ValueError(message)\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoSeries from a file\n\n Parameters\n ----------\n\n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary, see:\n http://toblerity.github.io/fiona/README.html#usage\n for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to\n access multi-layer data, data stored within archives (zip files),\n etc.\n\n \"\"\"\n import fiona\n geoms = []\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n for rec in f:\n geoms.append(shape(rec['geometry']))\n g = GeoSeries(geoms)\n g.crs = crs\n return g\n\n @property\n def __geo_interface__(self):\n \"\"\"Returns a GeoSeries as a python feature collection\n \"\"\"\n from geopandas import GeoDataFrame\n return GeoDataFrame({'geometry': self}).__geo_interface__\n\n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n from geopandas import GeoDataFrame\n data = GeoDataFrame({\"geometry\": self,\n \"id\":self.index.values},\n index=self.index)\n data.crs = self.crs\n data.to_file(filename, driver, **kwargs)\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoSeries\n\n def _wrapped_pandas_method(self, mtd, *args, **kwargs):\n \"\"\"Wrap a generic pandas method to ensure it returns a GeoSeries\"\"\"\n val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)\n if type(val) == Series:\n val.__class__ = GeoSeries\n val.crs = self.crs\n val._invalidate_sindex()\n return val\n\n def __getitem__(self, key):\n return self._wrapped_pandas_method('__getitem__', key)\n\n def sort_index(self, *args, **kwargs):\n return self._wrapped_pandas_method('sort_index', *args, **kwargs)\n\n def take(self, *args, **kwargs):\n return self._wrapped_pandas_method('take', *args, **kwargs)\n\n def select(self, *args, **kwargs):\n return self._wrapped_pandas_method('select', *args, **kwargs)\n\n @property\n def _can_hold_na(self):\n return False\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, order='C'):\n \"\"\"\n Make a copy of this GeoSeries object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoSeries\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n return GeoSeries(self.values.copy(order), index=self.index,\n name=self.name).__finalize__(self)\n\n def isnull(self):\n \"\"\"Null values in a GeoSeries are represented by empty geometric objects\"\"\"\n non_geo_null = super(GeoSeries, self).isnull()\n val = self.apply(_is_empty)\n return np.logical_or(non_geo_null, val)\n\n def fillna(self, value=None, method=None, inplace=False,\n **kwargs):\n \"\"\"Fill NA/NaN values with a geometry (empty polygon by default).\n\n \"method\" is currently not implemented for pandas <= 0.12.\n \"\"\"\n if value is None:\n value = Point()\n return super(GeoSeries, self).fillna(value=value, method=method,\n inplace=inplace, **kwargs)\n\n def align(self, other, join='outer', level=None, copy=True,\n fill_value=None, **kwargs):\n if fill_value is None:\n fill_value = Point()\n left, right = super(GeoSeries, self).align(other, join=join,\n level=level, copy=copy,\n fill_value=fill_value,\n **kwargs)\n if isinstance(other, GeoSeries):\n return GeoSeries(left), GeoSeries(right)\n else: # It is probably a Series, let's keep it that way\n return GeoSeries(left), right\n\n\n def __contains__(self, other):\n \"\"\"Allow tests of the form \"geom in s\"\n\n Tests whether a GeoSeries contains a geometry.\n\n Note: This is not the same as the geometric method \"contains\".\n \"\"\"\n if isinstance(other, BaseGeometry):\n return np.any(self.geom_equals(other))\n else:\n return False\n\n def plot(self, *args, **kwargs):\n return plot_series(self, *args, **kwargs)\n\n plot.__doc__ = plot_series.__doc__\n\n #\n # Additional methods\n #\n\n def to_crs(self, crs=None, epsg=None):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n\n `to_crs` passes the `crs` argument to the `Proj` function from the\n `pyproj` library (with the option `preserve_units=True`). It can\n therefore accept proj4 projections in any format\n supported by `Proj`, including dictionaries, or proj4 strings.\n\n \"\"\"\n from fiona.crs import from_epsg\n if self.crs is None:\n raise ValueError('Cannot transform naive geometries. '\n 'Please set a crs on the object first.')\n if crs is None:\n try:\n crs = from_epsg(epsg)\n except TypeError:\n raise TypeError('Must set either crs or epsg for output.')\n proj_in = pyproj.Proj(self.crs, preserve_units=True)\n proj_out = pyproj.Proj(crs, preserve_units=True)\n project = partial(pyproj.transform, proj_in, proj_out)\n result = self.apply(lambda geom: transform(project, geom))\n result.__class__ = GeoSeries\n result.crs = crs\n result._invalidate_sindex()\n return result\n\n def to_json(self, **kwargs):\n \"\"\"\n Returns a GeoJSON string representation of the GeoSeries.\n\n Parameters\n ----------\n *kwargs* that will be passed to json.dumps().\n \"\"\"\n return json.dumps(self.__geo_interface__, **kwargs)\n\n #\n # Implement standard operators for GeoSeries\n #\n\n def __xor__(self, other):\n \"\"\"Implement ^ operator as for builtin set type\"\"\"\n return self.symmetric_difference(other)\n\n def __or__(self, other):\n \"\"\"Implement | operator as for builtin set type\"\"\"\n return self.union(other)\n\n def __and__(self, other):\n \"\"\"Implement & operator as for builtin set type\"\"\"\n return self.intersection(other)\n\n def __sub__(self, other):\n \"\"\"Implement - operator as for builtin set type\"\"\"\n return self.difference(other)\n\nGeoSeries._create_indexer('cx', _CoordinateIndexer)\n", "path": "geopandas/geoseries.py"}]} | 4,009 | 191 |
gh_patches_debug_6399 | rasdani/github-patches | git_diff | facebookresearch__hydra-277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up --cfg
It will be cleaner for --cfg to always take one of job|hydra|all, and not have it default to job.
this will eliminate the problem that occures when --cfg is not the last flag in the command line and some override is associated with --cfg.
</issue>
<code>
[start of hydra/_internal/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import inspect
3 import os
4 import sys
5 from .hydra import Hydra
6 import argparse
7
8
9 def run_hydra(args_parser, task_function, config_path, strict):
10 stack = inspect.stack()
11 frame = stack[2]
12
13 calling_file = None
14 calling__module = None
15 try:
16 calling_file = frame[0].f_locals["__file__"]
17 except KeyError:
18 pass
19 try:
20 module_envs = ["HYDRA_MAIN_MODULE", "FB_PAR_MAIN_MODULE", "FB_XAR_MAIN_MODULE"]
21 for module_env in module_envs:
22 if module_env in os.environ:
23 calling__module = os.environ[module_env]
24 break
25
26 if calling__module is None:
27 calling__module = frame[0].f_globals[frame[3]].__module__
28 except KeyError:
29 pass
30
31 hydra = Hydra(
32 calling_file=calling_file,
33 calling_module=calling__module,
34 config_path=config_path,
35 task_function=task_function,
36 strict=strict,
37 )
38
39 args = args_parser.parse_args()
40 if args.help:
41 hydra.app_help(args_parser=args_parser, args=args)
42 sys.exit(0)
43 if args.hydra_help:
44 hydra.hydra_help(args_parser=args_parser, args=args)
45 sys.exit(0)
46
47 has_show_cfg = args.cfg is not None
48 num_commands = args.run + has_show_cfg + args.multirun + args.shell_completion
49 if num_commands > 1:
50 raise ValueError(
51 "Only one of --run, --multirun, -cfg and --shell_completion can be specified"
52 )
53 if num_commands == 0:
54 args.run = True
55 if args.run:
56 hydra.run(overrides=args.overrides)
57 elif args.multirun:
58 hydra.multirun(overrides=args.overrides)
59 elif args.cfg:
60 hydra.show_cfg(overrides=args.overrides, cfg_type=args.cfg)
61 elif args.shell_completion:
62 hydra.shell_completion(overrides=args.overrides)
63 else:
64 print("Command not specified")
65 sys.exit(1)
66
67
68 def _get_exec_command():
69 if sys.argv[0].endswith(".py"):
70 return "python {}".format(sys.argv[0])
71 else:
72 # Running as an installed app (setuptools entry point)
73 executable = os.path.basename(sys.argv[0])
74 return executable
75
76
77 def get_args_parser():
78 from .. import __version__
79
80 parser = argparse.ArgumentParser(add_help=False, description="Hydra")
81 parser.add_argument("--help", "-h", action="store_true", help="Application's help")
82 parser.add_argument("--hydra-help", action="store_true", help="Hydra's help")
83 parser.add_argument(
84 "--version", action="version", version="Hydra {}".format(__version__)
85 )
86 parser.add_argument(
87 "overrides",
88 nargs="*",
89 help="Any key=value arguments to override config values (use dots for.nested=overrides)",
90 )
91
92 parser.add_argument(
93 "--cfg",
94 "-c",
95 const="job",
96 nargs="?",
97 choices=["job", "hydra", "all"],
98 help="Show config instead of running, optional value indicates which config to show (defaults to job)",
99 )
100
101 parser.add_argument("--run", "-r", action="store_true", help="Run a job")
102
103 parser.add_argument(
104 "--multirun",
105 "-m",
106 action="store_true",
107 help="Run multiple jobs with the configured launcher",
108 )
109
110 shell = "SHELL_NAME"
111 install_cmd = 'eval "$({} -sc install={})"'.format(_get_exec_command(), shell)
112 uninstall_cmd = 'eval "$({} -sc uninstall={})"'.format(_get_exec_command(), shell)
113 parser.add_argument(
114 "--shell_completion",
115 "-sc",
116 action="store_true",
117 help="""Install or Uninstall shell completion:
118 Install:
119 {}
120
121 Uninstall:
122 {}
123 """.format(
124 install_cmd, uninstall_cmd
125 ),
126 )
127 return parser
128
129
130 def get_args(args=None):
131 return get_args_parser().parse_args(args=args)
132
[end of hydra/_internal/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/_internal/utils.py b/hydra/_internal/utils.py
--- a/hydra/_internal/utils.py
+++ b/hydra/_internal/utils.py
@@ -92,10 +92,8 @@
parser.add_argument(
"--cfg",
"-c",
- const="job",
- nargs="?",
choices=["job", "hydra", "all"],
- help="Show config instead of running, optional value indicates which config to show (defaults to job)",
+ help="Show config instead of running [job|hydra|all]",
)
parser.add_argument("--run", "-r", action="store_true", help="Run a job")
| {"golden_diff": "diff --git a/hydra/_internal/utils.py b/hydra/_internal/utils.py\n--- a/hydra/_internal/utils.py\n+++ b/hydra/_internal/utils.py\n@@ -92,10 +92,8 @@\n parser.add_argument(\n \"--cfg\",\n \"-c\",\n- const=\"job\",\n- nargs=\"?\",\n choices=[\"job\", \"hydra\", \"all\"],\n- help=\"Show config instead of running, optional value indicates which config to show (defaults to job)\",\n+ help=\"Show config instead of running [job|hydra|all]\",\n )\n \n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", help=\"Run a job\")\n", "issue": "Clean up --cfg\nIt will be cleaner for --cfg to always take one of job|hydra|all, and not have it default to job.\r\nthis will eliminate the problem that occures when --cfg is not the last flag in the command line and some override is associated with --cfg.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport inspect\nimport os\nimport sys\nfrom .hydra import Hydra\nimport argparse\n\n\ndef run_hydra(args_parser, task_function, config_path, strict):\n stack = inspect.stack()\n frame = stack[2]\n\n calling_file = None\n calling__module = None\n try:\n calling_file = frame[0].f_locals[\"__file__\"]\n except KeyError:\n pass\n try:\n module_envs = [\"HYDRA_MAIN_MODULE\", \"FB_PAR_MAIN_MODULE\", \"FB_XAR_MAIN_MODULE\"]\n for module_env in module_envs:\n if module_env in os.environ:\n calling__module = os.environ[module_env]\n break\n\n if calling__module is None:\n calling__module = frame[0].f_globals[frame[3]].__module__\n except KeyError:\n pass\n\n hydra = Hydra(\n calling_file=calling_file,\n calling_module=calling__module,\n config_path=config_path,\n task_function=task_function,\n strict=strict,\n )\n\n args = args_parser.parse_args()\n if args.help:\n hydra.app_help(args_parser=args_parser, args=args)\n sys.exit(0)\n if args.hydra_help:\n hydra.hydra_help(args_parser=args_parser, args=args)\n sys.exit(0)\n\n has_show_cfg = args.cfg is not None\n num_commands = args.run + has_show_cfg + args.multirun + args.shell_completion\n if num_commands > 1:\n raise ValueError(\n \"Only one of --run, --multirun, -cfg and --shell_completion can be specified\"\n )\n if num_commands == 0:\n args.run = True\n if args.run:\n hydra.run(overrides=args.overrides)\n elif args.multirun:\n hydra.multirun(overrides=args.overrides)\n elif args.cfg:\n hydra.show_cfg(overrides=args.overrides, cfg_type=args.cfg)\n elif args.shell_completion:\n hydra.shell_completion(overrides=args.overrides)\n else:\n print(\"Command not specified\")\n sys.exit(1)\n\n\ndef _get_exec_command():\n if sys.argv[0].endswith(\".py\"):\n return \"python {}\".format(sys.argv[0])\n else:\n # Running as an installed app (setuptools entry point)\n executable = os.path.basename(sys.argv[0])\n return executable\n\n\ndef get_args_parser():\n from .. import __version__\n\n parser = argparse.ArgumentParser(add_help=False, description=\"Hydra\")\n parser.add_argument(\"--help\", \"-h\", action=\"store_true\", help=\"Application's help\")\n parser.add_argument(\"--hydra-help\", action=\"store_true\", help=\"Hydra's help\")\n parser.add_argument(\n \"--version\", action=\"version\", version=\"Hydra {}\".format(__version__)\n )\n parser.add_argument(\n \"overrides\",\n nargs=\"*\",\n help=\"Any key=value arguments to override config values (use dots for.nested=overrides)\",\n )\n\n parser.add_argument(\n \"--cfg\",\n \"-c\",\n const=\"job\",\n nargs=\"?\",\n choices=[\"job\", \"hydra\", \"all\"],\n help=\"Show config instead of running, optional value indicates which config to show (defaults to job)\",\n )\n\n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", help=\"Run a job\")\n\n parser.add_argument(\n \"--multirun\",\n \"-m\",\n action=\"store_true\",\n help=\"Run multiple jobs with the configured launcher\",\n )\n\n shell = \"SHELL_NAME\"\n install_cmd = 'eval \"$({} -sc install={})\"'.format(_get_exec_command(), shell)\n uninstall_cmd = 'eval \"$({} -sc uninstall={})\"'.format(_get_exec_command(), shell)\n parser.add_argument(\n \"--shell_completion\",\n \"-sc\",\n action=\"store_true\",\n help=\"\"\"Install or Uninstall shell completion:\n Install:\n {}\n\n Uninstall:\n {}\n\"\"\".format(\n install_cmd, uninstall_cmd\n ),\n )\n return parser\n\n\ndef get_args(args=None):\n return get_args_parser().parse_args(args=args)\n", "path": "hydra/_internal/utils.py"}]} | 1,803 | 152 |
gh_patches_debug_33836 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to see any CKV2 checks in the list
**Describe the bug**
I posted this on Slack first and was confirmed it's a bug. I can't see any of the CKV2 checks when running `checkov -l`
**Expected behavior**
CKV2 / graph checks should be present working.
**Screenshots**

**Desktop (please complete the following information):**
- OS: os X
- Checkov Version 2.0.107
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit"
32 ]
33 },
34 install_requires=[
35 "bc-python-hcl2>=0.3.18",
36 "cloudsplaining>=0.4.1",
37 "deep_merge",
38 "tabulate",
39 "colorama",
40 "termcolor",
41 "junit-xml",
42 "dpath>=1.5.0,<2",
43 "pyyaml>=5.4.1",
44 "boto3==1.17.27",
45 "GitPython",
46 "six==1.15.0",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker"
55 ],
56 license="Apache License 2.0",
57 name="checkov",
58 version=version,
59 python_requires=">=3.7",
60 description="Infrastructure as code static analysis",
61 author="bridgecrew",
62 author_email="[email protected]",
63 url="https://github.com/nimrodkor/checkov",
64 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
65 scripts=["bin/checkov", "bin/checkov.cmd"],
66 long_description=long_description,
67 long_description_content_type="text/markdown",
68 classifiers=[
69 'Environment :: Console',
70 'Intended Audience :: Developers',
71 'Intended Audience :: System Administrators',
72 'Programming Language :: Python :: 3.7',
73 'Programming Language :: Python :: 3.8',
74 'Programming Language :: Python :: 3.9',
75 'Topic :: Security',
76 'Topic :: Software Development :: Build Tools'
77 ]
78 )
79
[end of setup.py]
[start of checkov/terraform/checks_infra/registry.py]
1 import json
2 import logging
3 import os
4
5 import yaml
6
7 from checkov.common.graph.checks_infra.base_parser import BaseGraphCheckParser
8 from checkov.common.graph.checks_infra.registry import BaseRegistry
9 from checkov.terraform.checks_infra.resources_types import resources_types
10
11 CHECKS_POSSIBLE_ENDING = [".yaml", ".yml"]
12
13
14 class Registry(BaseRegistry):
15 def __init__(self, parser=BaseGraphCheckParser(), checks_dir=None):
16 super().__init__(parser)
17 self.checks = []
18 self.parser = parser
19 self.checks_dir = checks_dir if checks_dir else \
20 os.path.join(os.path.dirname(os.path.dirname(__file__)), "checks", "graph_checks")
21 self.logger = logging.getLogger(__name__)
22
23 def load_checks(self):
24 self._load_checks_from_dir(self.checks_dir)
25
26 def _load_checks_from_dir(self, directory: str):
27 dir = os.path.expanduser(directory)
28 self.logger.debug("Loading external checks from {}".format(dir))
29 for root, d_names, f_names in os.walk(dir):
30 for file in f_names:
31 file_ending = os.path.splitext(file)[1]
32 if file_ending in CHECKS_POSSIBLE_ENDING:
33 with open(f'{root}/{file}', "r") as f:
34 if dir != self.checks_dir:
35 # This is a custom check, log its loading
36 logging.info(f"loading {file}")
37 check_yaml = yaml.safe_load(f)
38 check_json = json.loads(json.dumps(check_yaml))
39 check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))
40 if not any([c for c in self.checks if check.id == c.id]):
41 self.checks.append(check)
42
43 def load_external_checks(self, dir: str):
44 self._load_checks_from_dir(dir)
45
46 @staticmethod
47 def _get_resource_types(check_json):
48 provider = check_json.get("scope", {}).get("provider", "").lower()
49 return resources_types.get(provider)
50
[end of checkov/terraform/checks_infra/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks_infra/registry.py b/checkov/terraform/checks_infra/registry.py
--- a/checkov/terraform/checks_infra/registry.py
+++ b/checkov/terraform/checks_infra/registry.py
@@ -25,15 +25,20 @@
def _load_checks_from_dir(self, directory: str):
dir = os.path.expanduser(directory)
- self.logger.debug("Loading external checks from {}".format(dir))
+
+ checks_dir_content = os.listdir(os.path.dirname(dir))
+ self.logger.info(f'Checks dir contents: {checks_dir_content}')
+
+ self.logger.info("Loading external checks from {}".format(dir))
for root, d_names, f_names in os.walk(dir):
+ self.logger.info(f'Searching through {d_names} and {f_names}')
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending in CHECKS_POSSIBLE_ENDING:
with open(f'{root}/{file}', "r") as f:
- if dir != self.checks_dir:
+ # if dir != self.checks_dir:
# This is a custom check, log its loading
- logging.info(f"loading {file}")
+ self.logger.info(f"loading {file}")
check_yaml = yaml.safe_load(f)
check_json = json.loads(json.dumps(check_yaml))
check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,6 +62,8 @@
author_email="[email protected]",
url="https://github.com/nimrodkor/checkov",
packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
+ include_package_data=True,
+ package_data = {'': ['*.yaml', '*.yml']},
scripts=["bin/checkov", "bin/checkov.cmd"],
long_description=long_description,
long_description_content_type="text/markdown",
| {"golden_diff": "diff --git a/checkov/terraform/checks_infra/registry.py b/checkov/terraform/checks_infra/registry.py\n--- a/checkov/terraform/checks_infra/registry.py\n+++ b/checkov/terraform/checks_infra/registry.py\n@@ -25,15 +25,20 @@\n \n def _load_checks_from_dir(self, directory: str):\n dir = os.path.expanduser(directory)\n- self.logger.debug(\"Loading external checks from {}\".format(dir))\n+\n+ checks_dir_content = os.listdir(os.path.dirname(dir))\n+ self.logger.info(f'Checks dir contents: {checks_dir_content}')\n+\n+ self.logger.info(\"Loading external checks from {}\".format(dir))\n for root, d_names, f_names in os.walk(dir):\n+ self.logger.info(f'Searching through {d_names} and {f_names}')\n for file in f_names:\n file_ending = os.path.splitext(file)[1]\n if file_ending in CHECKS_POSSIBLE_ENDING:\n with open(f'{root}/{file}', \"r\") as f:\n- if dir != self.checks_dir:\n+ # if dir != self.checks_dir:\n # This is a custom check, log its loading\n- logging.info(f\"loading {file}\")\n+ self.logger.info(f\"loading {file}\")\n check_yaml = yaml.safe_load(f)\n check_json = json.loads(json.dumps(check_yaml))\n check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,6 +62,8 @@\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n+ include_package_data=True,\n+ package_data = {'': ['*.yaml', '*.yml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "Unable to see any CKV2 checks in the list\n**Describe the bug**\r\nI posted this on Slack first and was confirmed it's a bug. I can't see any of the CKV2 checks when running `checkov -l`\r\n\r\n**Expected behavior**\r\nCKV2 / graph checks should be present working.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: os X\r\n - Checkov Version 2.0.107\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.27\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}, {"content": "import json\nimport logging\nimport os\n\nimport yaml\n\nfrom checkov.common.graph.checks_infra.base_parser import BaseGraphCheckParser\nfrom checkov.common.graph.checks_infra.registry import BaseRegistry\nfrom checkov.terraform.checks_infra.resources_types import resources_types\n\nCHECKS_POSSIBLE_ENDING = [\".yaml\", \".yml\"]\n\n\nclass Registry(BaseRegistry):\n def __init__(self, parser=BaseGraphCheckParser(), checks_dir=None):\n super().__init__(parser)\n self.checks = []\n self.parser = parser\n self.checks_dir = checks_dir if checks_dir else \\\n os.path.join(os.path.dirname(os.path.dirname(__file__)), \"checks\", \"graph_checks\")\n self.logger = logging.getLogger(__name__)\n\n def load_checks(self):\n self._load_checks_from_dir(self.checks_dir)\n\n def _load_checks_from_dir(self, directory: str):\n dir = os.path.expanduser(directory)\n self.logger.debug(\"Loading external checks from {}\".format(dir))\n for root, d_names, f_names in os.walk(dir):\n for file in f_names:\n file_ending = os.path.splitext(file)[1]\n if file_ending in CHECKS_POSSIBLE_ENDING:\n with open(f'{root}/{file}', \"r\") as f:\n if dir != self.checks_dir:\n # This is a custom check, log its loading\n logging.info(f\"loading {file}\")\n check_yaml = yaml.safe_load(f)\n check_json = json.loads(json.dumps(check_yaml))\n check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))\n if not any([c for c in self.checks if check.id == c.id]):\n self.checks.append(check)\n\n def load_external_checks(self, dir: str):\n self._load_checks_from_dir(dir)\n\n @staticmethod\n def _get_resource_types(check_json):\n provider = check_json.get(\"scope\", {}).get(\"provider\", \"\").lower()\n return resources_types.get(provider)\n", "path": "checkov/terraform/checks_infra/registry.py"}]} | 1,953 | 453 |
gh_patches_debug_801 | rasdani/github-patches | git_diff | google__flax-2407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Outdated `rich` dependency version
The version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages.
https://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33
Should be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.
</issue>
<code>
[start of setup.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding="utf-8").read()
24 except OSError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.3.16",
30 "matplotlib", # only needed for tensorboard export
31 "msgpack",
32 "optax",
33 "rich~=11.1",
34 "typing_extensions>=4.1.1",
35 "PyYAML>=5.4.1",
36 ]
37
38 tests_require = [
39 "atari-py==0.2.5", # Last version does not have the ROMs we test on pre-packaged
40 "clu", # All examples.
41 "gym==0.18.3",
42 "jaxlib",
43 "jraph>=0.0.6dev0",
44 "ml-collections",
45 "opencv-python",
46 "pytest",
47 "pytest-cov",
48 "pytest-custom_exit_code",
49 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
50 "pytype",
51 "sentencepiece", # WMT example.
52 "svn",
53 "tensorflow_text>=2.4.0", # WMT example.
54 "tensorflow_datasets",
55 "tensorflow",
56 "torch",
57 ]
58
59 __version__ = None
60
61 with open("flax/version.py") as f:
62 exec(f.read(), globals())
63
64 setup(
65 name="flax",
66 version=__version__,
67 description="Flax: A neural network library for JAX designed for flexibility",
68 long_description="\n\n".join([README]),
69 long_description_content_type="text/markdown",
70 classifiers=[
71 "Development Status :: 3 - Alpha",
72 "Intended Audience :: Developers",
73 "Intended Audience :: Science/Research",
74 "License :: OSI Approved :: Apache Software License",
75 "Programming Language :: Python :: 3.7",
76 "Topic :: Scientific/Engineering :: Artificial Intelligence",
77 ],
78 keywords="",
79 author="Flax team",
80 author_email="[email protected]",
81 url="https://github.com/google/flax",
82 packages=find_packages(),
83 package_data={"flax": ["py.typed"]},
84 zip_safe=False,
85 install_requires=install_requires,
86 extras_require={
87 "testing": tests_require,
88 },
89 )
90
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
"matplotlib", # only needed for tensorboard export
"msgpack",
"optax",
- "rich~=11.1",
+ "rich>=11.1",
"typing_extensions>=4.1.1",
"PyYAML>=5.4.1",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n- \"rich~=11.1\",\n+ \"rich>=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n ]\n", "issue": "Outdated `rich` dependency version\nThe version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages.\r\n\r\nhttps://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33\r\n\r\nShould be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.\r\n\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept OSError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.16\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich~=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph>=0.0.6dev0\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-custom_exit_code\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]} | 1,502 | 99 |
gh_patches_debug_7456 | rasdani/github-patches | git_diff | encode__httpx-421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTPError should be importable frop the top-level httpx package
From #365:
> `HTTPError` is not available at the top level like the other exceptions and like it was in requests. This is a somewhat common exception to catch raise_for_status, so having to add another import statement for it is a bit odd if intentional.
Put differently, `HTTPError` is missing from `httpx/__init__.py`. Adding it would allow us to do:
```python
import httpx
try:
r = httpx.get("https://example.org")
r.raise_for_status()
except httpx.HTTPError:
...
```
</issue>
<code>
[start of httpx/__init__.py]
1 from .__version__ import __description__, __title__, __version__
2 from .api import delete, get, head, options, patch, post, put, request
3 from .client import AsyncClient, Client
4 from .concurrency.asyncio import AsyncioBackend
5 from .concurrency.base import (
6 BaseBackgroundManager,
7 BasePoolSemaphore,
8 BaseTCPStream,
9 ConcurrencyBackend,
10 )
11 from .config import (
12 USER_AGENT,
13 CertTypes,
14 HTTPVersionConfig,
15 HTTPVersionTypes,
16 PoolLimits,
17 SSLConfig,
18 TimeoutConfig,
19 TimeoutTypes,
20 VerifyTypes,
21 )
22 from .dispatch.base import AsyncDispatcher, Dispatcher
23 from .dispatch.connection import HTTPConnection
24 from .dispatch.connection_pool import ConnectionPool
25 from .dispatch.proxy_http import HTTPProxy, HTTPProxyMode
26 from .exceptions import (
27 ConnectTimeout,
28 CookieConflict,
29 DecodingError,
30 InvalidURL,
31 NotRedirectResponse,
32 PoolTimeout,
33 ProtocolError,
34 ProxyError,
35 ReadTimeout,
36 RedirectBodyUnavailable,
37 RedirectLoop,
38 ResponseClosed,
39 ResponseNotRead,
40 StreamConsumed,
41 Timeout,
42 TooManyRedirects,
43 WriteTimeout,
44 )
45 from .middleware.digest_auth import DigestAuth
46 from .models import (
47 URL,
48 AsyncRequest,
49 AsyncRequestData,
50 AsyncResponse,
51 AsyncResponseContent,
52 AuthTypes,
53 Cookies,
54 CookieTypes,
55 Headers,
56 HeaderTypes,
57 Origin,
58 QueryParams,
59 QueryParamTypes,
60 Request,
61 RequestData,
62 RequestFiles,
63 Response,
64 ResponseContent,
65 URLTypes,
66 )
67 from .status_codes import StatusCode, codes
68
69 __all__ = [
70 "__description__",
71 "__title__",
72 "__version__",
73 "delete",
74 "get",
75 "head",
76 "options",
77 "patch",
78 "post",
79 "patch",
80 "put",
81 "request",
82 "AsyncClient",
83 "Client",
84 "AsyncioBackend",
85 "USER_AGENT",
86 "CertTypes",
87 "PoolLimits",
88 "SSLConfig",
89 "TimeoutConfig",
90 "VerifyTypes",
91 "HTTPConnection",
92 "BasePoolSemaphore",
93 "BaseBackgroundManager",
94 "ConnectionPool",
95 "HTTPProxy",
96 "HTTPProxyMode",
97 "ConnectTimeout",
98 "CookieConflict",
99 "DecodingError",
100 "InvalidURL",
101 "NotRedirectResponse",
102 "PoolTimeout",
103 "ProtocolError",
104 "ReadTimeout",
105 "RedirectBodyUnavailable",
106 "RedirectLoop",
107 "ResponseClosed",
108 "ResponseNotRead",
109 "StreamConsumed",
110 "ProxyError",
111 "Timeout",
112 "TooManyRedirects",
113 "WriteTimeout",
114 "AsyncDispatcher",
115 "BaseTCPStream",
116 "ConcurrencyBackend",
117 "Dispatcher",
118 "URL",
119 "URLTypes",
120 "StatusCode",
121 "codes",
122 "TimeoutTypes",
123 "HTTPVersionTypes",
124 "HTTPVersionConfig",
125 "AsyncRequest",
126 "AsyncRequestData",
127 "AsyncResponse",
128 "AsyncResponseContent",
129 "AuthTypes",
130 "Cookies",
131 "CookieTypes",
132 "Headers",
133 "HeaderTypes",
134 "Origin",
135 "QueryParams",
136 "QueryParamTypes",
137 "Request",
138 "RequestData",
139 "Response",
140 "ResponseContent",
141 "RequestFiles",
142 "DigestAuth",
143 ]
144
[end of httpx/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/__init__.py b/httpx/__init__.py
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -27,6 +27,7 @@
ConnectTimeout,
CookieConflict,
DecodingError,
+ HTTPError,
InvalidURL,
NotRedirectResponse,
PoolTimeout,
@@ -97,6 +98,7 @@
"ConnectTimeout",
"CookieConflict",
"DecodingError",
+ "HTTPError",
"InvalidURL",
"NotRedirectResponse",
"PoolTimeout",
| {"golden_diff": "diff --git a/httpx/__init__.py b/httpx/__init__.py\n--- a/httpx/__init__.py\n+++ b/httpx/__init__.py\n@@ -27,6 +27,7 @@\n ConnectTimeout,\n CookieConflict,\n DecodingError,\n+ HTTPError,\n InvalidURL,\n NotRedirectResponse,\n PoolTimeout,\n@@ -97,6 +98,7 @@\n \"ConnectTimeout\",\n \"CookieConflict\",\n \"DecodingError\",\n+ \"HTTPError\",\n \"InvalidURL\",\n \"NotRedirectResponse\",\n \"PoolTimeout\",\n", "issue": "HTTPError should be importable frop the top-level httpx package\nFrom #365:\r\n\r\n> `HTTPError` is not available at the top level like the other exceptions and like it was in requests. This is a somewhat common exception to catch raise_for_status, so having to add another import statement for it is a bit odd if intentional.\r\n\r\nPut differently, `HTTPError` is missing from `httpx/__init__.py`. Adding it would allow us to do:\r\n\r\n```python\r\nimport httpx\r\n\r\ntry:\r\n r = httpx.get(\"https://example.org\")\r\n r.raise_for_status()\r\nexcept httpx.HTTPError:\r\n ...\r\n```\n", "before_files": [{"content": "from .__version__ import __description__, __title__, __version__\nfrom .api import delete, get, head, options, patch, post, put, request\nfrom .client import AsyncClient, Client\nfrom .concurrency.asyncio import AsyncioBackend\nfrom .concurrency.base import (\n BaseBackgroundManager,\n BasePoolSemaphore,\n BaseTCPStream,\n ConcurrencyBackend,\n)\nfrom .config import (\n USER_AGENT,\n CertTypes,\n HTTPVersionConfig,\n HTTPVersionTypes,\n PoolLimits,\n SSLConfig,\n TimeoutConfig,\n TimeoutTypes,\n VerifyTypes,\n)\nfrom .dispatch.base import AsyncDispatcher, Dispatcher\nfrom .dispatch.connection import HTTPConnection\nfrom .dispatch.connection_pool import ConnectionPool\nfrom .dispatch.proxy_http import HTTPProxy, HTTPProxyMode\nfrom .exceptions import (\n ConnectTimeout,\n CookieConflict,\n DecodingError,\n InvalidURL,\n NotRedirectResponse,\n PoolTimeout,\n ProtocolError,\n ProxyError,\n ReadTimeout,\n RedirectBodyUnavailable,\n RedirectLoop,\n ResponseClosed,\n ResponseNotRead,\n StreamConsumed,\n Timeout,\n TooManyRedirects,\n WriteTimeout,\n)\nfrom .middleware.digest_auth import DigestAuth\nfrom .models import (\n URL,\n AsyncRequest,\n AsyncRequestData,\n AsyncResponse,\n AsyncResponseContent,\n AuthTypes,\n Cookies,\n CookieTypes,\n Headers,\n HeaderTypes,\n Origin,\n QueryParams,\n QueryParamTypes,\n Request,\n RequestData,\n RequestFiles,\n Response,\n ResponseContent,\n URLTypes,\n)\nfrom .status_codes import StatusCode, codes\n\n__all__ = [\n \"__description__\",\n \"__title__\",\n \"__version__\",\n \"delete\",\n \"get\",\n \"head\",\n \"options\",\n \"patch\",\n \"post\",\n \"patch\",\n \"put\",\n \"request\",\n \"AsyncClient\",\n \"Client\",\n \"AsyncioBackend\",\n \"USER_AGENT\",\n \"CertTypes\",\n \"PoolLimits\",\n \"SSLConfig\",\n \"TimeoutConfig\",\n \"VerifyTypes\",\n \"HTTPConnection\",\n \"BasePoolSemaphore\",\n \"BaseBackgroundManager\",\n \"ConnectionPool\",\n \"HTTPProxy\",\n \"HTTPProxyMode\",\n \"ConnectTimeout\",\n \"CookieConflict\",\n \"DecodingError\",\n \"InvalidURL\",\n \"NotRedirectResponse\",\n \"PoolTimeout\",\n \"ProtocolError\",\n \"ReadTimeout\",\n \"RedirectBodyUnavailable\",\n \"RedirectLoop\",\n \"ResponseClosed\",\n \"ResponseNotRead\",\n \"StreamConsumed\",\n \"ProxyError\",\n \"Timeout\",\n \"TooManyRedirects\",\n \"WriteTimeout\",\n \"AsyncDispatcher\",\n \"BaseTCPStream\",\n \"ConcurrencyBackend\",\n \"Dispatcher\",\n \"URL\",\n \"URLTypes\",\n \"StatusCode\",\n \"codes\",\n \"TimeoutTypes\",\n \"HTTPVersionTypes\",\n \"HTTPVersionConfig\",\n \"AsyncRequest\",\n \"AsyncRequestData\",\n \"AsyncResponse\",\n \"AsyncResponseContent\",\n \"AuthTypes\",\n \"Cookies\",\n \"CookieTypes\",\n \"Headers\",\n \"HeaderTypes\",\n \"Origin\",\n \"QueryParams\",\n \"QueryParamTypes\",\n \"Request\",\n \"RequestData\",\n \"Response\",\n \"ResponseContent\",\n \"RequestFiles\",\n \"DigestAuth\",\n]\n", "path": "httpx/__init__.py"}]} | 1,708 | 129 |
gh_patches_debug_6982 | rasdani/github-patches | git_diff | psf__black-3247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Suggestion: Mention contextlib.ExitStack in "Use backslashes for with statements"?
**Is this related to a problem? Please describe.**
On [Black's future style doc](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html#using-backslashes-for-with-statements) it talks about `black` handling a `with` with multiple context managers in a few ways (Python-version-dependent).
**Describe the solution you'd like**
Black should suggest to the user (in documentation only) that if they don't like Black's current or future behavior, that constructing a `contextlib.ExitStack` can help the issue while maintaining 95%-ish of the code readability and has minimal overhead.
As an example (feel free to steal), and using the existing code on that page
```python
with contextlib.ExitStack() as exit_stack:
cm1 = exit_stack.enter_context(make_context_manager(1))
cm2 = exit_stack.enter_context(make_context_manager(2))
cm3 = exit_stack.enter_context(make_context_manager(3))
cm4 = exit_stack.enter_context(make_context_manager(4))
...
```
**Describe alternatives you've considered**
N/A
**Additional context**
:heart: black
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/stable/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15
16 import os
17 import string
18 from pathlib import Path
19
20 from pkg_resources import get_distribution
21
22 CURRENT_DIR = Path(__file__).parent
23
24
25 def make_pypi_svg(version: str) -> None:
26 template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
27 target: Path = CURRENT_DIR / "_static" / "pypi.svg"
28 with open(str(template), "r", encoding="utf8") as f:
29 svg: str = string.Template(f.read()).substitute(version=version)
30 with open(str(target), "w", encoding="utf8") as f:
31 f.write(svg)
32
33
34 # Necessary so Click doesn't hit an encode error when called by
35 # sphinxcontrib-programoutput on Windows.
36 os.putenv("pythonioencoding", "utf-8")
37
38 # -- Project information -----------------------------------------------------
39
40 project = "Black"
41 copyright = "2018-Present, Łukasz Langa and contributors to Black"
42 author = "Łukasz Langa and contributors to Black"
43
44 # Autopopulate version
45 # The version, including alpha/beta/rc tags, but not commit hash and datestamps
46 release = get_distribution("black").version.split("+")[0]
47 # The short X.Y version.
48 version = release
49 for sp in "abcfr":
50 version = version.split(sp)[0]
51
52 make_pypi_svg(release)
53
54
55 # -- General configuration ---------------------------------------------------
56
57 # If your documentation needs a minimal Sphinx version, state it here.
58 needs_sphinx = "3.0"
59
60 # Add any Sphinx extension module names here, as strings. They can be
61 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
62 # ones.
63 extensions = [
64 "sphinx.ext.autodoc",
65 "sphinx.ext.intersphinx",
66 "sphinx.ext.napoleon",
67 "myst_parser",
68 "sphinxcontrib.programoutput",
69 "sphinx_copybutton",
70 ]
71
72 # If you need extensions of a certain version or higher, list them here.
73 needs_extensions = {"myst_parser": "0.13.7"}
74
75 # Add any paths that contain templates here, relative to this directory.
76 templates_path = ["_templates"]
77
78 # The suffix(es) of source filenames.
79 # You can specify multiple suffix as a list of string:
80 source_suffix = [".rst", ".md"]
81
82 # The master toctree document.
83 master_doc = "index"
84
85 # The language for content autogenerated by Sphinx. Refer to documentation
86 # for a list of supported languages.
87 #
88 # This is also used if you do content translation via gettext catalogs.
89 # Usually you set "language" from the command line for these cases.
90 language = "en"
91
92 # List of patterns, relative to source directory, that match files and
93 # directories to ignore when looking for source files.
94 # This pattern also affects html_static_path and html_extra_path .
95
96 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
97
98 # The name of the Pygments (syntax highlighting) style to use.
99 pygments_style = "sphinx"
100
101 # We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for
102 # headers up to and including level 3.
103 myst_heading_anchors = 3
104
105 # Prettier support formatting some MyST syntax but not all, so let's disable the
106 # unsupported yet still enabled by default ones.
107 myst_disable_syntax = [
108 "colon_fence",
109 "myst_block_break",
110 "myst_line_comment",
111 "math_block",
112 ]
113
114 # Optional MyST Syntaxes
115 myst_enable_extensions = []
116
117 # -- Options for HTML output -------------------------------------------------
118
119 # The theme to use for HTML and HTML Help pages. See the documentation for
120 # a list of builtin themes.
121 #
122 html_theme = "furo"
123 html_logo = "_static/logo2-readme.png"
124
125 # Add any paths that contain custom static files (such as style sheets) here,
126 # relative to this directory. They are copied after the builtin static files,
127 # so a file named "default.css" will overwrite the builtin "default.css".
128 html_static_path = ["_static"]
129
130 # Custom sidebar templates, must be a dictionary that maps document names
131 # to template names.
132 #
133 # The default sidebars (for documents that don't match any pattern) are
134 # defined by theme itself. Builtin themes are using these templates by
135 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
136 # 'searchbox.html']``.
137 #
138 # html_sidebars = {}
139
140
141 # -- Options for HTMLHelp output ---------------------------------------------
142
143 # Output file base name for HTML help builder.
144 htmlhelp_basename = "blackdoc"
145
146
147 # -- Options for LaTeX output ------------------------------------------------
148
149 # Grouping the document tree into LaTeX files. List of tuples
150 # (source start file, target name, title,
151 # author, documentclass [howto, manual, or own class]).
152 latex_documents = [
153 (
154 master_doc,
155 "black.tex",
156 "Documentation for Black",
157 "Łukasz Langa and contributors to Black",
158 "manual",
159 )
160 ]
161
162
163 # -- Options for manual page output ------------------------------------------
164
165 # One entry per manual page. List of tuples
166 # (source start file, name, description, authors, manual section).
167 man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)]
168
169
170 # -- Options for Texinfo output ----------------------------------------------
171
172 # Grouping the document tree into Texinfo files. List of tuples
173 # (source start file, target name, title, author,
174 # dir menu entry, description, category)
175 texinfo_documents = [
176 (
177 master_doc,
178 "Black",
179 "Documentation for Black",
180 author,
181 "Black",
182 "The uncompromising Python code formatter",
183 "Miscellaneous",
184 )
185 ]
186
187
188 # -- Options for Epub output -------------------------------------------------
189
190 # Bibliographic Dublin Core info.
191 epub_title = project
192 epub_author = author
193 epub_publisher = author
194 epub_copyright = copyright
195
196 # The unique identifier of the text. This can be a ISBN number
197 # or the project homepage.
198 #
199 # epub_identifier = ''
200
201 # A unique identification for the text.
202 #
203 # epub_uid = ''
204
205 # A list of files that should not be packed into the epub file.
206 epub_exclude_files = ["search.html"]
207
208
209 # -- Extension configuration -------------------------------------------------
210
211 autodoc_member_order = "bysource"
212
213 # -- Options for intersphinx extension ---------------------------------------
214
215 # Example configuration for intersphinx: refer to the Python standard library.
216 intersphinx_mapping = {"https://docs.python.org/3/": None}
217
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,7 +55,7 @@
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "3.0"
+needs_sphinx = "4.4"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -55,7 +55,7 @@\n # -- General configuration ---------------------------------------------------\n \n # If your documentation needs a minimal Sphinx version, state it here.\n-needs_sphinx = \"3.0\"\n+needs_sphinx = \"4.4\"\n \n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n", "issue": "Suggestion: Mention contextlib.ExitStack in \"Use backslashes for with statements\"?\n**Is this related to a problem? Please describe.**\r\n\r\nOn [Black's future style doc](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html#using-backslashes-for-with-statements) it talks about `black` handling a `with` with multiple context managers in a few ways (Python-version-dependent).\r\n\r\n**Describe the solution you'd like**\r\n\r\nBlack should suggest to the user (in documentation only) that if they don't like Black's current or future behavior, that constructing a `contextlib.ExitStack` can help the issue while maintaining 95%-ish of the code readability and has minimal overhead.\r\n\r\nAs an example (feel free to steal), and using the existing code on that page\r\n\r\n```python\r\nwith contextlib.ExitStack() as exit_stack:\r\n cm1 = exit_stack.enter_context(make_context_manager(1))\r\n cm2 = exit_stack.enter_context(make_context_manager(2))\r\n cm3 = exit_stack.enter_context(make_context_manager(3))\r\n cm4 = exit_stack.enter_context(make_context_manager(4))\r\n ...\r\n```\r\n\r\n**Describe alternatives you've considered**\r\n\r\nN/A\r\n\r\n**Additional context**\r\n\r\n:heart: black\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport string\nfrom pathlib import Path\n\nfrom pkg_resources import get_distribution\n\nCURRENT_DIR = Path(__file__).parent\n\n\ndef make_pypi_svg(version: str) -> None:\n template: Path = CURRENT_DIR / \"_static\" / \"pypi_template.svg\"\n target: Path = CURRENT_DIR / \"_static\" / \"pypi.svg\"\n with open(str(template), \"r\", encoding=\"utf8\") as f:\n svg: str = string.Template(f.read()).substitute(version=version)\n with open(str(target), \"w\", encoding=\"utf8\") as f:\n f.write(svg)\n\n\n# Necessary so Click doesn't hit an encode error when called by\n# sphinxcontrib-programoutput on Windows.\nos.putenv(\"pythonioencoding\", \"utf-8\")\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Black\"\ncopyright = \"2018-Present, \u0141ukasz Langa and contributors to Black\"\nauthor = \"\u0141ukasz Langa and contributors to Black\"\n\n# Autopopulate version\n# The version, including alpha/beta/rc tags, but not commit hash and datestamps\nrelease = get_distribution(\"black\").version.split(\"+\")[0]\n# The short X.Y version.\nversion = release\nfor sp in \"abcfr\":\n version = version.split(sp)[0]\n\nmake_pypi_svg(release)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"3.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"myst_parser\",\n \"sphinxcontrib.programoutput\",\n \"sphinx_copybutton\",\n]\n\n# If you need extensions of a certain version or higher, list them here.\nneeds_extensions = {\"myst_parser\": \"0.13.7\"}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for\n# headers up to and including level 3.\nmyst_heading_anchors = 3\n\n# Prettier support formatting some MyST syntax but not all, so let's disable the\n# unsupported yet still enabled by default ones.\nmyst_disable_syntax = [\n \"colon_fence\",\n \"myst_block_break\",\n \"myst_line_comment\",\n \"math_block\",\n]\n\n# Optional MyST Syntaxes\nmyst_enable_extensions = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_logo = \"_static/logo2-readme.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"blackdoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"black.tex\",\n \"Documentation for Black\",\n \"\u0141ukasz Langa and contributors to Black\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"black\", \"Documentation for Black\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Black\",\n \"Documentation for Black\",\n author,\n \"Black\",\n \"The uncompromising Python code formatter\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# -- Extension configuration -------------------------------------------------\n\nautodoc_member_order = \"bysource\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/3/\": None}\n", "path": "docs/conf.py"}]} | 2,873 | 111 |
gh_patches_debug_16770 | rasdani/github-patches | git_diff | nautobot__nautobot-5736 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Family column to Device Types table
### Environment
* Nautobot version (Docker tag too if applicable): 2.2.3
* Python version: 3.11
* Database platform, version: postgres 15
* Middleware(s):
### Steps to Reproduce
1. Go to Device Types list https://demo.nautobot.com/dcim/device-types/
2. Try to add Family in Table Configuration
3. Family is not in the list of available columns
#3559 #5040
https://github.com/nautobot/nautobot/blob/42440ebd9b381534ad89d62420ebea00d703d64e/nautobot/dcim/tables/devicetypes.py#L95
### Expected Behavior
Family column can be selected in Device Types list
### Observed Behavior
Family column is not in the list of available columns
Add Family column to Device Types table
### Environment
* Nautobot version (Docker tag too if applicable): 2.2.3
* Python version: 3.11
* Database platform, version: postgres 15
* Middleware(s):
### Steps to Reproduce
1. Go to Device Types list https://demo.nautobot.com/dcim/device-types/
2. Try to add Family in Table Configuration
3. Family is not in the list of available columns
#3559 #5040
https://github.com/nautobot/nautobot/blob/42440ebd9b381534ad89d62420ebea00d703d64e/nautobot/dcim/tables/devicetypes.py#L95
### Expected Behavior
Family column can be selected in Device Types list
### Observed Behavior
Family column is not in the list of available columns
</issue>
<code>
[start of nautobot/dcim/tables/devicetypes.py]
1 import django_tables2 as tables
2
3 from nautobot.core.tables import (
4 BaseTable,
5 BooleanColumn,
6 ButtonsColumn,
7 LinkedCountColumn,
8 TagColumn,
9 ToggleColumn,
10 )
11 from nautobot.dcim.models import (
12 ConsolePortTemplate,
13 ConsoleServerPortTemplate,
14 DeviceBayTemplate,
15 DeviceFamily,
16 DeviceType,
17 FrontPortTemplate,
18 InterfaceTemplate,
19 Manufacturer,
20 PowerOutletTemplate,
21 PowerPortTemplate,
22 RearPortTemplate,
23 )
24
25 __all__ = (
26 "ConsolePortTemplateTable",
27 "ConsoleServerPortTemplateTable",
28 "DeviceBayTemplateTable",
29 "DeviceFamilyTable",
30 "DeviceTypeTable",
31 "FrontPortTemplateTable",
32 "InterfaceTemplateTable",
33 "ManufacturerTable",
34 "PowerOutletTemplateTable",
35 "PowerPortTemplateTable",
36 "RearPortTemplateTable",
37 )
38
39
40 #
41 # Manufacturers
42 #
43
44
45 class ManufacturerTable(BaseTable):
46 pk = ToggleColumn()
47 name = tables.LinkColumn()
48 device_type_count = LinkedCountColumn(
49 viewname="dcim:devicetype_list", url_params={"manufacturer": "name"}, verbose_name="Device Types"
50 )
51 inventory_item_count = LinkedCountColumn(
52 viewname="dcim:inventoryitem_list", url_params={"manufacturer": "name"}, verbose_name="Inventory Items"
53 )
54 platform_count = LinkedCountColumn(
55 viewname="dcim:platform_list", url_params={"manufacturer": "name"}, verbose_name="Platforms"
56 )
57 actions = ButtonsColumn(Manufacturer)
58
59 class Meta(BaseTable.Meta):
60 model = Manufacturer
61 fields = (
62 "pk",
63 "name",
64 "device_type_count",
65 "inventory_item_count",
66 "platform_count",
67 "description",
68 "actions",
69 )
70
71
72 #
73 # Device Family
74 #
75
76
77 class DeviceFamilyTable(BaseTable):
78 pk = ToggleColumn()
79 name = tables.Column(linkify=True)
80 device_type_count = LinkedCountColumn(
81 viewname="dcim:devicetype_list", url_params={"device_family": "name"}, verbose_name="Device Types"
82 )
83 actions = ButtonsColumn(DeviceFamily)
84 tags = TagColumn(url_name="dcim:devicefamily_list")
85
86 class Meta(BaseTable.Meta):
87 model = DeviceFamily
88 fields = (
89 "pk",
90 "name",
91 "device_type_count",
92 "description",
93 "actions",
94 "tags",
95 )
96
97
98 #
99 # Device types
100 #
101
102
103 class DeviceTypeTable(BaseTable):
104 pk = ToggleColumn()
105 model = tables.Column(linkify=True, verbose_name="Device Type")
106 is_full_depth = BooleanColumn(verbose_name="Full Depth")
107 device_count = LinkedCountColumn(
108 viewname="dcim:device_list",
109 url_params={"device_type": "pk"},
110 verbose_name="Devices",
111 )
112 tags = TagColumn(url_name="dcim:devicetype_list")
113
114 class Meta(BaseTable.Meta):
115 model = DeviceType
116 fields = (
117 "pk",
118 "model",
119 "manufacturer",
120 "part_number",
121 "u_height",
122 "is_full_depth",
123 "subdevice_role",
124 "device_count",
125 "tags",
126 )
127 default_columns = (
128 "pk",
129 "model",
130 "manufacturer",
131 "part_number",
132 "u_height",
133 "is_full_depth",
134 "device_count",
135 )
136
137
138 #
139 # Device type components
140 #
141
142
143 class ComponentTemplateTable(BaseTable):
144 pk = ToggleColumn()
145 name = tables.Column(order_by=("_name",))
146
147
148 class ConsolePortTemplateTable(ComponentTemplateTable):
149 actions = ButtonsColumn(
150 model=ConsolePortTemplate,
151 buttons=("edit", "delete"),
152 return_url_extra="%23tab_consoleports",
153 )
154
155 class Meta(BaseTable.Meta):
156 model = ConsolePortTemplate
157 fields = ("pk", "name", "label", "type", "description", "actions")
158 empty_text = "None"
159
160
161 class ConsoleServerPortTemplateTable(ComponentTemplateTable):
162 actions = ButtonsColumn(
163 model=ConsoleServerPortTemplate,
164 buttons=("edit", "delete"),
165 return_url_extra="%23tab_consoleserverports",
166 )
167
168 class Meta(BaseTable.Meta):
169 model = ConsoleServerPortTemplate
170 fields = ("pk", "name", "label", "type", "description", "actions")
171 empty_text = "None"
172
173
174 class PowerPortTemplateTable(ComponentTemplateTable):
175 actions = ButtonsColumn(
176 model=PowerPortTemplate,
177 buttons=("edit", "delete"),
178 return_url_extra="%23tab_powerports",
179 )
180
181 class Meta(BaseTable.Meta):
182 model = PowerPortTemplate
183 fields = (
184 "pk",
185 "name",
186 "label",
187 "type",
188 "maximum_draw",
189 "allocated_draw",
190 "description",
191 "actions",
192 )
193 empty_text = "None"
194
195
196 class PowerOutletTemplateTable(ComponentTemplateTable):
197 actions = ButtonsColumn(
198 model=PowerOutletTemplate,
199 buttons=("edit", "delete"),
200 return_url_extra="%23tab_poweroutlets",
201 )
202
203 class Meta(BaseTable.Meta):
204 model = PowerOutletTemplate
205 fields = (
206 "pk",
207 "name",
208 "label",
209 "type",
210 "power_port",
211 "feed_leg",
212 "description",
213 "actions",
214 )
215 empty_text = "None"
216
217
218 class InterfaceTemplateTable(ComponentTemplateTable):
219 mgmt_only = BooleanColumn(verbose_name="Management Only")
220 actions = ButtonsColumn(
221 model=InterfaceTemplate,
222 buttons=("edit", "delete"),
223 return_url_extra="%23tab_interfaces",
224 )
225
226 class Meta(BaseTable.Meta):
227 model = InterfaceTemplate
228 fields = ("pk", "name", "label", "mgmt_only", "type", "description", "actions")
229 empty_text = "None"
230
231
232 class FrontPortTemplateTable(ComponentTemplateTable):
233 rear_port_position = tables.Column(verbose_name="Position")
234 actions = ButtonsColumn(
235 model=FrontPortTemplate,
236 buttons=("edit", "delete"),
237 return_url_extra="%23tab_frontports",
238 )
239
240 class Meta(BaseTable.Meta):
241 model = FrontPortTemplate
242 fields = (
243 "pk",
244 "name",
245 "label",
246 "type",
247 "rear_port",
248 "rear_port_position",
249 "description",
250 "actions",
251 )
252 empty_text = "None"
253
254
255 class RearPortTemplateTable(ComponentTemplateTable):
256 actions = ButtonsColumn(
257 model=RearPortTemplate,
258 buttons=("edit", "delete"),
259 return_url_extra="%23tab_rearports",
260 )
261
262 class Meta(BaseTable.Meta):
263 model = RearPortTemplate
264 fields = ("pk", "name", "label", "type", "positions", "description", "actions")
265 empty_text = "None"
266
267
268 class DeviceBayTemplateTable(ComponentTemplateTable):
269 actions = ButtonsColumn(
270 model=DeviceBayTemplate,
271 buttons=("edit", "delete"),
272 return_url_extra="%23tab_devicebays",
273 )
274
275 class Meta(BaseTable.Meta):
276 model = DeviceBayTemplate
277 fields = ("pk", "name", "label", "description", "actions")
278 empty_text = "None"
279
[end of nautobot/dcim/tables/devicetypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/dcim/tables/devicetypes.py b/nautobot/dcim/tables/devicetypes.py
--- a/nautobot/dcim/tables/devicetypes.py
+++ b/nautobot/dcim/tables/devicetypes.py
@@ -103,6 +103,8 @@
class DeviceTypeTable(BaseTable):
pk = ToggleColumn()
model = tables.Column(linkify=True, verbose_name="Device Type")
+ manufacturer = tables.Column(linkify=True)
+ device_family = tables.Column(linkify=True)
is_full_depth = BooleanColumn(verbose_name="Full Depth")
device_count = LinkedCountColumn(
viewname="dcim:device_list",
@@ -117,6 +119,7 @@
"pk",
"model",
"manufacturer",
+ "device_family",
"part_number",
"u_height",
"is_full_depth",
| {"golden_diff": "diff --git a/nautobot/dcim/tables/devicetypes.py b/nautobot/dcim/tables/devicetypes.py\n--- a/nautobot/dcim/tables/devicetypes.py\n+++ b/nautobot/dcim/tables/devicetypes.py\n@@ -103,6 +103,8 @@\n class DeviceTypeTable(BaseTable):\n pk = ToggleColumn()\n model = tables.Column(linkify=True, verbose_name=\"Device Type\")\n+ manufacturer = tables.Column(linkify=True)\n+ device_family = tables.Column(linkify=True)\n is_full_depth = BooleanColumn(verbose_name=\"Full Depth\")\n device_count = LinkedCountColumn(\n viewname=\"dcim:device_list\",\n@@ -117,6 +119,7 @@\n \"pk\",\n \"model\",\n \"manufacturer\",\n+ \"device_family\",\n \"part_number\",\n \"u_height\",\n \"is_full_depth\",\n", "issue": "Add Family column to Device Types table\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 2.2.3\r\n* Python version: 3.11\r\n* Database platform, version: postgres 15\r\n* Middleware(s):\r\n\r\n### Steps to Reproduce\r\n1. Go to Device Types list https://demo.nautobot.com/dcim/device-types/\r\n2. Try to add Family in Table Configuration\r\n3. Family is not in the list of available columns\r\n\r\n#3559 #5040 \r\nhttps://github.com/nautobot/nautobot/blob/42440ebd9b381534ad89d62420ebea00d703d64e/nautobot/dcim/tables/devicetypes.py#L95\r\n\r\n### Expected Behavior\r\nFamily column can be selected in Device Types list\r\n\r\n### Observed Behavior\r\nFamily column is not in the list of available columns\nAdd Family column to Device Types table\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 2.2.3\r\n* Python version: 3.11\r\n* Database platform, version: postgres 15\r\n* Middleware(s):\r\n\r\n### Steps to Reproduce\r\n1. Go to Device Types list https://demo.nautobot.com/dcim/device-types/\r\n2. Try to add Family in Table Configuration\r\n3. Family is not in the list of available columns\r\n\r\n#3559 #5040 \r\nhttps://github.com/nautobot/nautobot/blob/42440ebd9b381534ad89d62420ebea00d703d64e/nautobot/dcim/tables/devicetypes.py#L95\r\n\r\n### Expected Behavior\r\nFamily column can be selected in Device Types list\r\n\r\n### Observed Behavior\r\nFamily column is not in the list of available columns\n", "before_files": [{"content": "import django_tables2 as tables\n\nfrom nautobot.core.tables import (\n BaseTable,\n BooleanColumn,\n ButtonsColumn,\n LinkedCountColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom nautobot.dcim.models import (\n ConsolePortTemplate,\n ConsoleServerPortTemplate,\n DeviceBayTemplate,\n DeviceFamily,\n DeviceType,\n FrontPortTemplate,\n InterfaceTemplate,\n Manufacturer,\n PowerOutletTemplate,\n PowerPortTemplate,\n RearPortTemplate,\n)\n\n__all__ = (\n \"ConsolePortTemplateTable\",\n \"ConsoleServerPortTemplateTable\",\n \"DeviceBayTemplateTable\",\n \"DeviceFamilyTable\",\n \"DeviceTypeTable\",\n \"FrontPortTemplateTable\",\n \"InterfaceTemplateTable\",\n \"ManufacturerTable\",\n \"PowerOutletTemplateTable\",\n \"PowerPortTemplateTable\",\n \"RearPortTemplateTable\",\n)\n\n\n#\n# Manufacturers\n#\n\n\nclass ManufacturerTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n device_type_count = LinkedCountColumn(\n viewname=\"dcim:devicetype_list\", url_params={\"manufacturer\": \"name\"}, verbose_name=\"Device Types\"\n )\n inventory_item_count = LinkedCountColumn(\n viewname=\"dcim:inventoryitem_list\", url_params={\"manufacturer\": \"name\"}, verbose_name=\"Inventory Items\"\n )\n platform_count = LinkedCountColumn(\n viewname=\"dcim:platform_list\", url_params={\"manufacturer\": \"name\"}, verbose_name=\"Platforms\"\n )\n actions = ButtonsColumn(Manufacturer)\n\n class Meta(BaseTable.Meta):\n model = Manufacturer\n fields = (\n \"pk\",\n \"name\",\n \"device_type_count\",\n \"inventory_item_count\",\n \"platform_count\",\n \"description\",\n \"actions\",\n )\n\n\n#\n# Device Family\n#\n\n\nclass DeviceFamilyTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n device_type_count = LinkedCountColumn(\n viewname=\"dcim:devicetype_list\", url_params={\"device_family\": \"name\"}, verbose_name=\"Device Types\"\n )\n actions = ButtonsColumn(DeviceFamily)\n tags = TagColumn(url_name=\"dcim:devicefamily_list\")\n\n class Meta(BaseTable.Meta):\n model = DeviceFamily\n fields = (\n \"pk\",\n \"name\",\n \"device_type_count\",\n \"description\",\n \"actions\",\n \"tags\",\n )\n\n\n#\n# Device types\n#\n\n\nclass DeviceTypeTable(BaseTable):\n pk = ToggleColumn()\n model = tables.Column(linkify=True, verbose_name=\"Device Type\")\n is_full_depth = BooleanColumn(verbose_name=\"Full Depth\")\n device_count = LinkedCountColumn(\n viewname=\"dcim:device_list\",\n url_params={\"device_type\": \"pk\"},\n verbose_name=\"Devices\",\n )\n tags = TagColumn(url_name=\"dcim:devicetype_list\")\n\n class Meta(BaseTable.Meta):\n model = DeviceType\n fields = (\n \"pk\",\n \"model\",\n \"manufacturer\",\n \"part_number\",\n \"u_height\",\n \"is_full_depth\",\n \"subdevice_role\",\n \"device_count\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"model\",\n \"manufacturer\",\n \"part_number\",\n \"u_height\",\n \"is_full_depth\",\n \"device_count\",\n )\n\n\n#\n# Device type components\n#\n\n\nclass ComponentTemplateTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(order_by=(\"_name\",))\n\n\nclass ConsolePortTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=ConsolePortTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_consoleports\",\n )\n\n class Meta(BaseTable.Meta):\n model = ConsolePortTemplate\n fields = (\"pk\", \"name\", \"label\", \"type\", \"description\", \"actions\")\n empty_text = \"None\"\n\n\nclass ConsoleServerPortTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=ConsoleServerPortTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_consoleserverports\",\n )\n\n class Meta(BaseTable.Meta):\n model = ConsoleServerPortTemplate\n fields = (\"pk\", \"name\", \"label\", \"type\", \"description\", \"actions\")\n empty_text = \"None\"\n\n\nclass PowerPortTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=PowerPortTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_powerports\",\n )\n\n class Meta(BaseTable.Meta):\n model = PowerPortTemplate\n fields = (\n \"pk\",\n \"name\",\n \"label\",\n \"type\",\n \"maximum_draw\",\n \"allocated_draw\",\n \"description\",\n \"actions\",\n )\n empty_text = \"None\"\n\n\nclass PowerOutletTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=PowerOutletTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_poweroutlets\",\n )\n\n class Meta(BaseTable.Meta):\n model = PowerOutletTemplate\n fields = (\n \"pk\",\n \"name\",\n \"label\",\n \"type\",\n \"power_port\",\n \"feed_leg\",\n \"description\",\n \"actions\",\n )\n empty_text = \"None\"\n\n\nclass InterfaceTemplateTable(ComponentTemplateTable):\n mgmt_only = BooleanColumn(verbose_name=\"Management Only\")\n actions = ButtonsColumn(\n model=InterfaceTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_interfaces\",\n )\n\n class Meta(BaseTable.Meta):\n model = InterfaceTemplate\n fields = (\"pk\", \"name\", \"label\", \"mgmt_only\", \"type\", \"description\", \"actions\")\n empty_text = \"None\"\n\n\nclass FrontPortTemplateTable(ComponentTemplateTable):\n rear_port_position = tables.Column(verbose_name=\"Position\")\n actions = ButtonsColumn(\n model=FrontPortTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_frontports\",\n )\n\n class Meta(BaseTable.Meta):\n model = FrontPortTemplate\n fields = (\n \"pk\",\n \"name\",\n \"label\",\n \"type\",\n \"rear_port\",\n \"rear_port_position\",\n \"description\",\n \"actions\",\n )\n empty_text = \"None\"\n\n\nclass RearPortTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=RearPortTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_rearports\",\n )\n\n class Meta(BaseTable.Meta):\n model = RearPortTemplate\n fields = (\"pk\", \"name\", \"label\", \"type\", \"positions\", \"description\", \"actions\")\n empty_text = \"None\"\n\n\nclass DeviceBayTemplateTable(ComponentTemplateTable):\n actions = ButtonsColumn(\n model=DeviceBayTemplate,\n buttons=(\"edit\", \"delete\"),\n return_url_extra=\"%23tab_devicebays\",\n )\n\n class Meta(BaseTable.Meta):\n model = DeviceBayTemplate\n fields = (\"pk\", \"name\", \"label\", \"description\", \"actions\")\n empty_text = \"None\"\n", "path": "nautobot/dcim/tables/devicetypes.py"}]} | 3,249 | 198 |
gh_patches_debug_10189 | rasdani/github-patches | git_diff | beetbox__beets-1980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hook: Crash when using non-ASCII paths in command template
Using the config
```
hook:
hooks:
- event: album_imported
command: beet convert -ay path:{album.path}
```
and this command
```
beet import -a -L path:/path/to/lib/Ария
```
I get an UnicodeDecodeError from `hook.py` line 50.
</issue>
<code>
[start of beetsplug/hook.py]
1 # This file is part of beets.
2 # Copyright 2015, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Allows custom commands to be run when an event is emitted by beets"""
16 from __future__ import division, absolute_import, print_function
17
18 import shlex
19 import subprocess
20
21 from beets.plugins import BeetsPlugin
22 from beets.ui import _arg_encoding
23
24
25 class HookPlugin(BeetsPlugin):
26 """Allows custom commands to be run when an event is emitted by beets"""
27 def __init__(self):
28 super(HookPlugin, self).__init__()
29
30 self.config.add({
31 'hooks': []
32 })
33
34 hooks = self.config['hooks'].get(list)
35
36 for hook_index in range(len(hooks)):
37 hook = self.config['hooks'][hook_index]
38
39 hook_event = hook['event'].get()
40 hook_command = hook['command'].get()
41
42 self.create_and_register_hook(hook_event, hook_command)
43
44 def create_and_register_hook(self, event, command):
45 def hook_function(**kwargs):
46 if command is None or len(command) == 0:
47 self._log.error('invalid command "{0}"', command)
48 return
49
50 formatted_command = command.format(event=event, **kwargs)
51 encoded_command = formatted_command.decode(_arg_encoding())
52 command_pieces = shlex.split(encoded_command)
53
54 self._log.debug('Running command "{0}" for event {1}',
55 encoded_command, event)
56
57 try:
58 subprocess.Popen(command_pieces).wait()
59 except OSError as exc:
60 self._log.error('hook for {0} failed: {1}', event, exc)
61
62 self.register_listener(event, hook_function)
63
[end of beetsplug/hook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/hook.py b/beetsplug/hook.py
--- a/beetsplug/hook.py
+++ b/beetsplug/hook.py
@@ -47,7 +47,9 @@
self._log.error('invalid command "{0}"', command)
return
- formatted_command = command.format(event=event, **kwargs)
+ unicode_command = command.decode('utf-8')
+ formatted_command = unicode_command.format(event=event,
+ **kwargs)
encoded_command = formatted_command.decode(_arg_encoding())
command_pieces = shlex.split(encoded_command)
| {"golden_diff": "diff --git a/beetsplug/hook.py b/beetsplug/hook.py\n--- a/beetsplug/hook.py\n+++ b/beetsplug/hook.py\n@@ -47,7 +47,9 @@\n self._log.error('invalid command \"{0}\"', command)\n return\n \n- formatted_command = command.format(event=event, **kwargs)\n+ unicode_command = command.decode('utf-8')\n+ formatted_command = unicode_command.format(event=event,\n+ **kwargs)\n encoded_command = formatted_command.decode(_arg_encoding())\n command_pieces = shlex.split(encoded_command)\n", "issue": "hook: Crash when using non-ASCII paths in command template\nUsing the config\n\n```\nhook:\n hooks:\n - event: album_imported\n command: beet convert -ay path:{album.path}\n```\n\nand this command\n\n```\nbeet import -a -L path:/path/to/lib/\u0410\u0440\u0438\u044f\n```\n\nI get an UnicodeDecodeError from `hook.py` line 50.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport shlex\nimport subprocess\n\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import _arg_encoding\n\n\nclass HookPlugin(BeetsPlugin):\n \"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\n def __init__(self):\n super(HookPlugin, self).__init__()\n\n self.config.add({\n 'hooks': []\n })\n\n hooks = self.config['hooks'].get(list)\n\n for hook_index in range(len(hooks)):\n hook = self.config['hooks'][hook_index]\n\n hook_event = hook['event'].get()\n hook_command = hook['command'].get()\n\n self.create_and_register_hook(hook_event, hook_command)\n\n def create_and_register_hook(self, event, command):\n def hook_function(**kwargs):\n if command is None or len(command) == 0:\n self._log.error('invalid command \"{0}\"', command)\n return\n\n formatted_command = command.format(event=event, **kwargs)\n encoded_command = formatted_command.decode(_arg_encoding())\n command_pieces = shlex.split(encoded_command)\n\n self._log.debug('Running command \"{0}\" for event {1}',\n encoded_command, event)\n\n try:\n subprocess.Popen(command_pieces).wait()\n except OSError as exc:\n self._log.error('hook for {0} failed: {1}', event, exc)\n\n self.register_listener(event, hook_function)\n", "path": "beetsplug/hook.py"}]} | 1,220 | 127 |
gh_patches_debug_19350 | rasdani/github-patches | git_diff | instadeepai__Mava-606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TEST] Jax Model Updating
### What do you want to test?
Jax model updating components
### Outline of test structure
* Unit tests
* Test components and hooks
### Definition of done
Passing checks, cover all hooks, edge cases considered
### Mandatory checklist before making a PR
* [ ] The success criteria laid down in “Definition of done” are met.
* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.
* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
</issue>
<code>
[start of mava/components/jax/training/model_updating.py]
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Trainer components for system updating."""
17
18 import abc
19 from dataclasses import dataclass
20 from typing import Any, Callable, Dict, Optional, Tuple
21
22 import jax
23 import jax.numpy as jnp
24 import optax
25 from acme.jax import networks as networks_lib
26 from jax.random import KeyArray
27 from optax._src import base as optax_base
28
29 from mava.components.jax.training import Batch, Utility
30 from mava.core_jax import SystemTrainer
31
32
33 class MinibatchUpdate(Utility):
34 @abc.abstractmethod
35 def __init__(self, config: Any) -> None:
36 """_summary_
37
38 Args:
39 config : _description_.
40 """
41 self.config = config
42
43 @staticmethod
44 def name() -> str:
45 """_summary_
46
47 Returns:
48 _description_
49 """
50 return "minibatch_update"
51
52
53 @dataclass
54 class MAPGMinibatchUpdateConfig:
55 learning_rate: float = 1e-3
56 adam_epsilon: float = 1e-5
57 max_gradient_norm: float = 0.5
58 optimizer: Optional[optax_base.GradientTransformation] = (None,)
59
60
61 class MAPGMinibatchUpdate(MinibatchUpdate):
62 def __init__(
63 self,
64 config: MAPGMinibatchUpdateConfig = MAPGMinibatchUpdateConfig(),
65 ):
66 """_summary_
67
68 Args:
69 config : _description_.
70 """
71 self.config = config
72
73 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
74 """_summary_"""
75
76 if not self.config.optimizer:
77 trainer.store.optimizer = optax.chain(
78 optax.clip_by_global_norm(self.config.max_gradient_norm),
79 optax.scale_by_adam(eps=self.config.adam_epsilon),
80 optax.scale(-self.config.learning_rate),
81 )
82 else:
83 trainer.store.optimizer = self.config.optimizer
84
85 # Initialize optimizers.
86 trainer.store.opt_states = {}
87 for net_key in trainer.store.networks["networks"].keys():
88 trainer.store.opt_states[net_key] = trainer.store.optimizer.init(
89 trainer.store.networks["networks"][net_key].params
90 ) # pytype: disable=attribute-error
91
92 def model_update_minibatch(
93 carry: Tuple[networks_lib.Params, optax.OptState], minibatch: Batch
94 ) -> Tuple[Tuple[Any, optax.OptState], Dict[str, Any]]:
95 """Performs model update for a single minibatch."""
96 params, opt_states = carry
97
98 # Normalize advantages at the minibatch level before using them.
99 advantages = jax.tree_map(
100 lambda x: (x - jnp.mean(x, axis=0)) / (jnp.std(x, axis=0) + 1e-8),
101 minibatch.advantages,
102 )
103
104 # Calculate the gradients and agent metrics.
105 gradients, agent_metrics = trainer.store.grad_fn(
106 params,
107 minibatch.observations,
108 minibatch.actions,
109 minibatch.behavior_log_probs,
110 minibatch.target_values,
111 advantages,
112 minibatch.behavior_values,
113 )
114
115 # Update the networks and optimizors.
116 metrics = {}
117 for agent_key in trainer.store.trainer_agents:
118 agent_net_key = trainer.store.trainer_agent_net_keys[agent_key]
119 # Apply updates
120 # TODO (dries): Use one optimizer per network type here and not
121 # just one.
122 updates, opt_states[agent_net_key] = trainer.store.optimizer.update(
123 gradients[agent_key], opt_states[agent_net_key]
124 )
125 params[agent_net_key] = optax.apply_updates(
126 params[agent_net_key], updates
127 )
128
129 agent_metrics[agent_key]["norm_grad"] = optax.global_norm(
130 gradients[agent_key]
131 )
132 agent_metrics[agent_key]["norm_updates"] = optax.global_norm(updates)
133 metrics[agent_key] = agent_metrics
134 return (params, opt_states), metrics
135
136 trainer.store.minibatch_update_fn = model_update_minibatch
137
138 @staticmethod
139 def config_class() -> Optional[Callable]:
140 """Config class used for component.
141
142 Returns:
143 config class/dataclass for component.
144 """
145 return MAPGMinibatchUpdateConfig
146
147
148 class EpochUpdate(Utility):
149 @abc.abstractmethod
150 def __init__(self, config: Any) -> None:
151 """_summary_
152
153 Args:
154 config : _description_.
155 """
156 self.config = config
157
158 @staticmethod
159 def name() -> str:
160 """_summary_
161
162 Returns:
163 _description_
164 """
165 return "epoch_update"
166
167
168 @dataclass
169 class MAPGEpochUpdateConfig:
170 num_epochs: int = 4
171 num_minibatches: int = 1
172
173
174 class MAPGEpochUpdate(EpochUpdate):
175 def __init__(
176 self,
177 config: MAPGEpochUpdateConfig = MAPGEpochUpdateConfig(),
178 ):
179 """_summary_
180
181 Args:
182 config : _description_.
183 """
184 self.config = config
185
186 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
187 """_summary_"""
188 trainer.store.num_epochs = self.config.num_epochs
189 trainer.store.num_minibatches = self.config.num_minibatches
190
191 def model_update_epoch(
192 carry: Tuple[KeyArray, Any, optax.OptState, Batch],
193 unused_t: Tuple[()],
194 ) -> Tuple[
195 Tuple[KeyArray, Any, optax.OptState, Batch],
196 Dict[str, jnp.ndarray],
197 ]:
198 """Performs model updates based on one epoch of data."""
199 key, params, opt_states, batch = carry
200
201 new_key, subkey = jax.random.split(key)
202
203 # TODO (dries): This assert is ugly. Is there a better way to do this check?
204 # Maybe using a tree map of some sort?
205 # shapes = jax.tree_map(
206 # lambda x: x.shape[0]==trainer.store.full_batch_size, batch
207 # )
208 # assert ...
209 assert (
210 list(batch.observations.values())[0].observation.shape[0]
211 == trainer.store.full_batch_size
212 )
213
214 permutation = jax.random.permutation(subkey, trainer.store.full_batch_size)
215
216 shuffled_batch = jax.tree_map(
217 lambda x: jnp.take(x, permutation, axis=0), batch
218 )
219 minibatches = jax.tree_map(
220 lambda x: jnp.reshape(
221 x, [self.config.num_minibatches, -1] + list(x.shape[1:])
222 ),
223 shuffled_batch,
224 )
225
226 (new_params, new_opt_states), metrics = jax.lax.scan(
227 trainer.store.minibatch_update_fn,
228 (params, opt_states),
229 minibatches,
230 length=self.config.num_minibatches,
231 )
232
233 return (new_key, new_params, new_opt_states, batch), metrics
234
235 trainer.store.epoch_update_fn = model_update_epoch
236
237 @staticmethod
238 def config_class() -> Optional[Callable]:
239 """Config class used for component.
240
241 Returns:
242 config class/dataclass for component.
243 """
244 return MAPGEpochUpdateConfig
245
[end of mava/components/jax/training/model_updating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mava/components/jax/training/model_updating.py b/mava/components/jax/training/model_updating.py
--- a/mava/components/jax/training/model_updating.py
+++ b/mava/components/jax/training/model_updating.py
@@ -113,7 +113,6 @@
)
# Update the networks and optimizors.
- metrics = {}
for agent_key in trainer.store.trainer_agents:
agent_net_key = trainer.store.trainer_agent_net_keys[agent_key]
# Apply updates
@@ -130,8 +129,7 @@
gradients[agent_key]
)
agent_metrics[agent_key]["norm_updates"] = optax.global_norm(updates)
- metrics[agent_key] = agent_metrics
- return (params, opt_states), metrics
+ return (params, opt_states), agent_metrics
trainer.store.minibatch_update_fn = model_update_minibatch
| {"golden_diff": "diff --git a/mava/components/jax/training/model_updating.py b/mava/components/jax/training/model_updating.py\n--- a/mava/components/jax/training/model_updating.py\n+++ b/mava/components/jax/training/model_updating.py\n@@ -113,7 +113,6 @@\n )\n \n # Update the networks and optimizors.\n- metrics = {}\n for agent_key in trainer.store.trainer_agents:\n agent_net_key = trainer.store.trainer_agent_net_keys[agent_key]\n # Apply updates\n@@ -130,8 +129,7 @@\n gradients[agent_key]\n )\n agent_metrics[agent_key][\"norm_updates\"] = optax.global_norm(updates)\n- metrics[agent_key] = agent_metrics\n- return (params, opt_states), metrics\n+ return (params, opt_states), agent_metrics\n \n trainer.store.minibatch_update_fn = model_update_minibatch\n", "issue": "[TEST] Jax Model Updating\n### What do you want to test?\r\nJax model updating components\r\n\r\n### Outline of test structure\r\n* Unit tests\r\n* Test components and hooks\r\n\r\n### Definition of done\r\nPassing checks, cover all hooks, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trainer components for system updating.\"\"\"\n\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, Optional, Tuple\n\nimport jax\nimport jax.numpy as jnp\nimport optax\nfrom acme.jax import networks as networks_lib\nfrom jax.random import KeyArray\nfrom optax._src import base as optax_base\n\nfrom mava.components.jax.training import Batch, Utility\nfrom mava.core_jax import SystemTrainer\n\n\nclass MinibatchUpdate(Utility):\n @abc.abstractmethod\n def __init__(self, config: Any) -> None:\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"minibatch_update\"\n\n\n@dataclass\nclass MAPGMinibatchUpdateConfig:\n learning_rate: float = 1e-3\n adam_epsilon: float = 1e-5\n max_gradient_norm: float = 0.5\n optimizer: Optional[optax_base.GradientTransformation] = (None,)\n\n\nclass MAPGMinibatchUpdate(MinibatchUpdate):\n def __init__(\n self,\n config: MAPGMinibatchUpdateConfig = MAPGMinibatchUpdateConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n\n if not self.config.optimizer:\n trainer.store.optimizer = optax.chain(\n optax.clip_by_global_norm(self.config.max_gradient_norm),\n optax.scale_by_adam(eps=self.config.adam_epsilon),\n optax.scale(-self.config.learning_rate),\n )\n else:\n trainer.store.optimizer = self.config.optimizer\n\n # Initialize optimizers.\n trainer.store.opt_states = {}\n for net_key in trainer.store.networks[\"networks\"].keys():\n trainer.store.opt_states[net_key] = trainer.store.optimizer.init(\n trainer.store.networks[\"networks\"][net_key].params\n ) # pytype: disable=attribute-error\n\n def model_update_minibatch(\n carry: Tuple[networks_lib.Params, optax.OptState], minibatch: Batch\n ) -> Tuple[Tuple[Any, optax.OptState], Dict[str, Any]]:\n \"\"\"Performs model update for a single minibatch.\"\"\"\n params, opt_states = carry\n\n # Normalize advantages at the minibatch level before using them.\n advantages = jax.tree_map(\n lambda x: (x - jnp.mean(x, axis=0)) / (jnp.std(x, axis=0) + 1e-8),\n minibatch.advantages,\n )\n\n # Calculate the gradients and agent metrics.\n gradients, agent_metrics = trainer.store.grad_fn(\n params,\n minibatch.observations,\n minibatch.actions,\n minibatch.behavior_log_probs,\n minibatch.target_values,\n advantages,\n minibatch.behavior_values,\n )\n\n # Update the networks and optimizors.\n metrics = {}\n for agent_key in trainer.store.trainer_agents:\n agent_net_key = trainer.store.trainer_agent_net_keys[agent_key]\n # Apply updates\n # TODO (dries): Use one optimizer per network type here and not\n # just one.\n updates, opt_states[agent_net_key] = trainer.store.optimizer.update(\n gradients[agent_key], opt_states[agent_net_key]\n )\n params[agent_net_key] = optax.apply_updates(\n params[agent_net_key], updates\n )\n\n agent_metrics[agent_key][\"norm_grad\"] = optax.global_norm(\n gradients[agent_key]\n )\n agent_metrics[agent_key][\"norm_updates\"] = optax.global_norm(updates)\n metrics[agent_key] = agent_metrics\n return (params, opt_states), metrics\n\n trainer.store.minibatch_update_fn = model_update_minibatch\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return MAPGMinibatchUpdateConfig\n\n\nclass EpochUpdate(Utility):\n @abc.abstractmethod\n def __init__(self, config: Any) -> None:\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"epoch_update\"\n\n\n@dataclass\nclass MAPGEpochUpdateConfig:\n num_epochs: int = 4\n num_minibatches: int = 1\n\n\nclass MAPGEpochUpdate(EpochUpdate):\n def __init__(\n self,\n config: MAPGEpochUpdateConfig = MAPGEpochUpdateConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n trainer.store.num_epochs = self.config.num_epochs\n trainer.store.num_minibatches = self.config.num_minibatches\n\n def model_update_epoch(\n carry: Tuple[KeyArray, Any, optax.OptState, Batch],\n unused_t: Tuple[()],\n ) -> Tuple[\n Tuple[KeyArray, Any, optax.OptState, Batch],\n Dict[str, jnp.ndarray],\n ]:\n \"\"\"Performs model updates based on one epoch of data.\"\"\"\n key, params, opt_states, batch = carry\n\n new_key, subkey = jax.random.split(key)\n\n # TODO (dries): This assert is ugly. Is there a better way to do this check?\n # Maybe using a tree map of some sort?\n # shapes = jax.tree_map(\n # lambda x: x.shape[0]==trainer.store.full_batch_size, batch\n # )\n # assert ...\n assert (\n list(batch.observations.values())[0].observation.shape[0]\n == trainer.store.full_batch_size\n )\n\n permutation = jax.random.permutation(subkey, trainer.store.full_batch_size)\n\n shuffled_batch = jax.tree_map(\n lambda x: jnp.take(x, permutation, axis=0), batch\n )\n minibatches = jax.tree_map(\n lambda x: jnp.reshape(\n x, [self.config.num_minibatches, -1] + list(x.shape[1:])\n ),\n shuffled_batch,\n )\n\n (new_params, new_opt_states), metrics = jax.lax.scan(\n trainer.store.minibatch_update_fn,\n (params, opt_states),\n minibatches,\n length=self.config.num_minibatches,\n )\n\n return (new_key, new_params, new_opt_states, batch), metrics\n\n trainer.store.epoch_update_fn = model_update_epoch\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return MAPGEpochUpdateConfig\n", "path": "mava/components/jax/training/model_updating.py"}]} | 3,019 | 209 |
gh_patches_debug_2863 | rasdani/github-patches | git_diff | Kinto__kinto-1786 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove colander deprecations
```
/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.6/site-packages/cornice/validators/_colander.py:110: DeprecationWarning: Setting schema to a class is deprecated. Set schema to an instance instead.
schema = _ensure_instantiated(schema)
```
</issue>
<code>
[start of kinto/core/views/batch.py]
1 import logging
2
3 import colander
4 from cornice.validators import colander_validator
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7
8 from kinto.core import errors
9 from kinto.core import Service
10 from kinto.core.errors import ErrorSchema
11 from kinto.core.utils import merge_dicts, build_request, build_response
12 from kinto.core.resource.viewset import CONTENT_TYPES
13
14
15 subrequest_logger = logging.getLogger('subrequest.summary')
16
17 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',
18 'POST', 'PUT', 'PATCH'))
19
20
21 def string_values(node, cstruct):
22 """Validate that a ``colander.Mapping`` only has strings in its values.
23
24 .. warning::
25
26 Should be associated to a ``colander.Mapping`` schema node.
27 """
28 are_strings = [isinstance(v, str) for v in cstruct.values()]
29 if not all(are_strings):
30 error_msg = '{} contains non string value'.format(cstruct)
31 raise colander.Invalid(node, error_msg)
32
33
34 class BatchRequestSchema(colander.MappingSchema):
35 method = colander.SchemaNode(colander.String(),
36 validator=valid_http_method,
37 missing=colander.drop)
38 path = colander.SchemaNode(colander.String(),
39 validator=colander.Regex('^/'))
40 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
41 validator=string_values,
42 missing=colander.drop)
43 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
44 missing=colander.drop)
45
46 @staticmethod
47 def schema_type():
48 return colander.Mapping(unknown='raise')
49
50
51 class BatchPayloadSchema(colander.MappingSchema):
52 defaults = BatchRequestSchema(missing=colander.drop).clone()
53 requests = colander.SchemaNode(colander.Sequence(),
54 BatchRequestSchema())
55
56 @staticmethod
57 def schema_type():
58 return colander.Mapping(unknown='raise')
59
60 def __init__(self, *args, **kwargs):
61 super().__init__(*args, **kwargs)
62 # On defaults, path is not mandatory.
63 self.get('defaults').get('path').missing = colander.drop
64
65 def deserialize(self, cstruct=colander.null):
66 """Preprocess received data to carefully merge defaults.
67 """
68 if cstruct is not colander.null:
69 defaults = cstruct.get('defaults')
70 requests = cstruct.get('requests')
71 if isinstance(defaults, dict) and isinstance(requests, list):
72 for request in requests:
73 if isinstance(request, dict):
74 merge_dicts(request, defaults)
75 return super().deserialize(cstruct)
76
77
78 class BatchRequest(colander.MappingSchema):
79 body = BatchPayloadSchema()
80
81
82 class BatchResponseSchema(colander.MappingSchema):
83 status = colander.SchemaNode(colander.Integer())
84 path = colander.SchemaNode(colander.String())
85 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
86 validator=string_values,
87 missing=colander.drop)
88 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
89 missing=colander.drop)
90
91
92 class BatchResponseBodySchema(colander.MappingSchema):
93 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))
94
95
96 class BatchResponse(colander.MappingSchema):
97 body = BatchResponseBodySchema()
98
99
100 class ErrorResponseSchema(colander.MappingSchema):
101 body = ErrorSchema()
102
103
104 batch_responses = {
105 '200': BatchResponse(description='Return a list of operation responses.'),
106 '400': ErrorResponseSchema(description='The request was badly formatted.'),
107 'default': ErrorResponseSchema(description='an unknown error occurred.')
108 }
109
110 batch = Service(name='batch', path='/batch',
111 description='Batch operations')
112
113
114 @batch.post(schema=BatchRequest,
115 validators=(colander_validator,),
116 content_type=CONTENT_TYPES,
117 permission=NO_PERMISSION_REQUIRED,
118 tags=['Batch'], operation_id='batch',
119 response_schemas=batch_responses)
120 def post_batch(request):
121 requests = request.validated['body']['requests']
122
123 request.log_context(batch_size=len(requests))
124
125 limit = request.registry.settings['batch_max_requests']
126 if limit and len(requests) > int(limit):
127 error_msg = 'Number of requests is limited to {}'.format(limit)
128 request.errors.add('body', 'requests', error_msg)
129 return
130
131 if any([batch.path in req['path'] for req in requests]):
132 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)
133 request.errors.add('body', 'requests', error_msg)
134 return
135
136 responses = []
137
138 for subrequest_spec in requests:
139 subrequest = build_request(request, subrequest_spec)
140
141 log_context = {**request.log_context(),
142 'path': subrequest.path,
143 'method': subrequest.method}
144 try:
145 # Invoke subrequest without individual transaction.
146 resp, subrequest = request.follow_subrequest(subrequest,
147 use_tweens=False)
148 except httpexceptions.HTTPException as e:
149 # Since some request in the batch failed, we need to stop the parent request
150 # through Pyramid's transaction manager. 5XX errors are already caught by
151 # pyramid_tm's commit_veto
152 # https://github.com/Kinto/kinto/issues/624
153 if e.status_code == 409:
154 request.tm.abort()
155
156 if e.content_type == 'application/json':
157 resp = e
158 else:
159 # JSONify raw Pyramid errors.
160 resp = errors.http_error(e)
161
162 subrequest_logger.info('subrequest.summary', extra=log_context)
163
164 dict_resp = build_response(resp, subrequest)
165 responses.append(dict_resp)
166
167 return {
168 'responses': responses
169 }
170
[end of kinto/core/views/batch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -111,7 +111,7 @@
description='Batch operations')
[email protected](schema=BatchRequest,
[email protected](schema=BatchRequest(),
validators=(colander_validator,),
content_type=CONTENT_TYPES,
permission=NO_PERMISSION_REQUIRED,
| {"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -111,7 +111,7 @@\n description='Batch operations')\n \n \[email protected](schema=BatchRequest,\[email protected](schema=BatchRequest(),\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n", "issue": "Remove colander deprecations\n```\r\n/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.6/site-packages/cornice/validators/_colander.py:110: DeprecationWarning: Setting schema to a class is deprecated. Set schema to an instance instead.\r\n schema = _ensure_instantiated(schema)\r\n```\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]} | 2,239 | 100 |
gh_patches_debug_85 | rasdani/github-patches | git_diff | pwndbg__pwndbg-616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bp breaks on pie binaries before run
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
In the windbg commands while setting a breakpoint using `bp` the address is converted to int
https://github.com/pwndbg/pwndbg/blob/ca17c6dbb0d1bc40ef060331aa48dad0675c5df9/pwndbg/commands/windbg.py#L297
using `pwndbg.inthook.xint`. This verifies if the argument is actually an address but in case of pie binaries it could be possible that the address is loaded later and then the argument is casted to `uint32_t`
### Steps to reproduce
```sh
[/tmp] tail hello.c
#include <stdio.h>
int main(int argc, char **argv) {
puts("Hello World");
return 0;
}
[/tmp] make hello 17:41:43
cc hello.c -o hello
[/tmp] gdb -q hello 17:41:47
pwndbg: loaded 177 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
Reading symbols from hello...(no debugging symbols found)...done.
pwndbg> bp 0x00005555555546b0
Breakpoint 1 at 0x555546b0
pwndbg> bl
Num Type Disp Enb Address What
1 breakpoint keep y 0x00000000555546b0
pwndbg> r
Starting program: /tmp/hello
Warning:
Cannot insert breakpoint 1.
Cannot access memory at address 0x555546b0
```
### My setup
```sh
[pwndbg] git --no-pager log -1 --stat 18:07:21 ☁ dev ☀
commit ca17c6dbb0d1bc40ef060331aa48dad0675c5df9
Author: Alisson Bezerra <[email protected]>
Date: Tue Apr 9 05:54:00 2019 -0300
Add xuntil command (#604)
pwndbg/commands/peda.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
[pwndbg] lsb_release -a 18:08:01 ☁ dev ☀
No LSB modules are available.
Distributor ID: Debian
Description: Debian GNU/Linux 9.8 (stretch)
Release: 9.8
Codename: stretch
[pwndbg] gdb -q 18:10:56 ☁ dev ☀
pwndbg: loaded 178 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
pwndbg> show version
GNU gdb (Debian 7.12-6) 7.12.0.20161007-git
Copyright (C) 2016 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
pwndbg> py import sys; print(sys.version)
3.5.3 (default, Sep 27 2018, 17:25:39)
[GCC 6.3.0 20170516]
pwndbg>
```
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
</issue>
<code>
[start of pwndbg/typeinfo.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Common types, and routines for manually loading types from file
5 via GCC.
6 """
7 from __future__ import absolute_import
8 from __future__ import division
9 from __future__ import print_function
10 from __future__ import unicode_literals
11
12 import glob
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 import gdb
19
20 import pwndbg.events
21 import pwndbg.gcc
22 import pwndbg.memoize
23
24 module = sys.modules[__name__]
25
26
27 def is_pointer(value):
28 type = value
29
30 if isinstance(value, gdb.Value):
31 type = value.type
32
33 type = type.strip_typedefs()
34 return type.code == gdb.TYPE_CODE_PTR
35
36
37 def lookup_types(*types):
38 for type_str in types:
39 try:
40 return gdb.lookup_type(type_str)
41 except Exception as e:
42 exc = e
43 raise exc
44
45
46 @pwndbg.events.start
47 @pwndbg.events.stop
48 def update():
49
50 module.char = gdb.lookup_type('char')
51 module.ulong = lookup_types('unsigned long', 'uint', 'u32')
52 module.long = lookup_types('long', 'int', 'i32')
53 module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')
54 module.ushort = lookup_types('unsigned short', 'ushort', 'u16')
55 module.uint = lookup_types('unsigned int', 'uint', 'u32')
56 module.void = lookup_types('void', '()')
57 module.uint8 = module.uchar
58 module.uint16 = module.ushort
59 module.uint32 = module.uint
60 module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')
61
62 module.int8 = lookup_types('char', 'i8')
63 module.int16 = lookup_types('short', 'i16')
64 module.int32 = lookup_types('int', 'i32')
65 module.int64 = lookup_types('long long', 'long', 'i64')
66
67 module.ssize_t = module.long
68 module.size_t = module.ulong
69
70 module.pvoid = void.pointer()
71 module.ppvoid = pvoid.pointer()
72 module.pchar = char.pointer()
73
74 module.ptrsize = pvoid.sizeof
75
76 if pvoid.sizeof == 4: module.ptrdiff = uint32
77 if pvoid.sizeof == 8: module.ptrdiff = uint64
78
79 module.null = gdb.Value(0).cast(void)
80
81 # Call it once so we load all of the types
82 update()
83
84 tempdir = tempfile.gettempdir() + '/pwndbg'
85 if not os.path.exists(tempdir):
86 os.mkdir(tempdir)
87
88 # Trial and error until things work
89 blacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',
90 'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',
91 'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',
92 'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',
93 'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',
94 'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',
95 'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',
96 'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',
97 'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',
98 'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']
99
100 def load(name):
101 """Load symbol by name from headers in standard system include directory"""
102 try:
103 return gdb.lookup_type(name)
104 except gdb.error:
105 pass
106
107 # s, _ = gdb.lookup_symbol(name)
108
109 # Try to find an architecture-specific include path
110 arch = pwndbg.arch.current.split(':')[0]
111
112 include_dir = glob.glob('/usr/%s*/include' % arch)
113
114 if include_dir:
115 include_dir = include_dir[0]
116 else:
117 include_dir = '/usr/include'
118
119 source = '#include <fstream>\n'
120
121 for subdir in ['', 'sys', 'netinet']:
122 dirname = os.path.join(include_dir, subdir)
123 for path in glob.glob(os.path.join(dirname, '*.h')):
124 if any(b in path for b in blacklist):
125 continue
126 print(path)
127 source += '#include "%s"\n' % path
128
129
130 source += '''
131 {name} foo;
132 '''.format(**locals())
133
134 filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))
135
136 with open(filename, 'w+') as f:
137 f.write(source)
138 f.flush()
139 os.fsync(f.fileno())
140
141 compile(filename)
142
143 return gdb.lookup_type(name)
144
145 def compile(filename=None, address=0):
146 """Compile and extract symbols from specified file"""
147 if filename is None:
148 print("Specify a filename to compile.")
149 return
150
151 objectname = os.path.splitext(filename)[0] + ".o"
152
153 if not os.path.exists(objectname):
154 gcc = pwndbg.gcc.which()
155 gcc += ['-w', '-c', '-g', filename, '-o', objectname]
156 try:
157 subprocess.check_output(gcc)
158 except subprocess.CalledProcessError as e:
159 return
160
161 add_symbol_file(objectname, address)
162
163 def add_symbol_file(filename=None, address=0):
164 """Read additional symbol table information from the object file filename"""
165 if filename is None:
166 print("Specify a symbol file to add.")
167 return
168
169 with pwndbg.events.Pause():
170 gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)
171
172 def read_gdbvalue(type_name, addr):
173 """ Read the memory contents at addr and interpret them as a GDB value with the given type """
174 gdb_type = pwndbg.typeinfo.load(type_name)
175 return gdb.Value(addr).cast(gdb_type.pointer()).dereference()
176
[end of pwndbg/typeinfo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py
--- a/pwndbg/typeinfo.py
+++ b/pwndbg/typeinfo.py
@@ -43,6 +43,7 @@
raise exc
[email protected]_objfile
@pwndbg.events.start
@pwndbg.events.stop
def update():
| {"golden_diff": "diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py\n--- a/pwndbg/typeinfo.py\n+++ b/pwndbg/typeinfo.py\n@@ -43,6 +43,7 @@\n raise exc\n \n \[email protected]_objfile\n @pwndbg.events.start\n @pwndbg.events.stop\n def update():\n", "issue": "bp breaks on pie binaries before run\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\nIn the windbg commands while setting a breakpoint using `bp` the address is converted to int\r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/ca17c6dbb0d1bc40ef060331aa48dad0675c5df9/pwndbg/commands/windbg.py#L297\r\n\r\nusing `pwndbg.inthook.xint`. This verifies if the argument is actually an address but in case of pie binaries it could be possible that the address is loaded later and then the argument is casted to `uint32_t`\r\n### Steps to reproduce\r\n```sh\r\n[/tmp] tail hello.c \r\n#include <stdio.h>\r\n\r\nint main(int argc, char **argv) {\r\n puts(\"Hello World\");\r\n return 0;\r\n}\r\n[/tmp] make hello 17:41:43 \r\ncc hello.c -o hello\r\n[/tmp] gdb -q hello 17:41:47 \r\npwndbg: loaded 177 commands. Type pwndbg [filter] for a list.\r\npwndbg: created $rebase, $ida gdb functions (can be used with print/break)\r\nReading symbols from hello...(no debugging symbols found)...done.\r\npwndbg> bp 0x00005555555546b0\r\nBreakpoint 1 at 0x555546b0\r\npwndbg> bl\r\nNum Type Disp Enb Address What\r\n1 breakpoint keep y 0x00000000555546b0 \r\npwndbg> r\r\nStarting program: /tmp/hello \r\nWarning:\r\nCannot insert breakpoint 1.\r\nCannot access memory at address 0x555546b0\r\n\r\n```\r\n\r\n### My setup\r\n```sh\r\n[pwndbg] git --no-pager log -1 --stat 18:07:21 \u2601 dev \u2600\r\ncommit ca17c6dbb0d1bc40ef060331aa48dad0675c5df9\r\nAuthor: Alisson Bezerra <[email protected]>\r\nDate: Tue Apr 9 05:54:00 2019 -0300\r\n\r\n Add xuntil command (#604)\r\n\r\n pwndbg/commands/peda.py | 23 +++++++++++++++++++++++\r\n 1 file changed, 23 insertions(+)\r\n[pwndbg] lsb_release -a 18:08:01 \u2601 dev \u2600\r\nNo LSB modules are available.\r\nDistributor ID:\tDebian\r\nDescription:\tDebian GNU/Linux 9.8 (stretch)\r\nRelease:\t9.8\r\nCodename:\tstretch\r\n[pwndbg] gdb -q 18:10:56 \u2601 dev \u2600\r\npwndbg: loaded 178 commands. Type pwndbg [filter] for a list.\r\npwndbg: created $rebase, $ida gdb functions (can be used with print/break)\r\npwndbg> show version\r\nGNU gdb (Debian 7.12-6) 7.12.0.20161007-git\r\nCopyright (C) 2016 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law. Type \"show copying\"\r\nand \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<http://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n<http://www.gnu.org/software/gdb/documentation/>.\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\npwndbg> py import sys; print(sys.version)\r\n3.5.3 (default, Sep 27 2018, 17:25:39) \r\n[GCC 6.3.0 20170516]\r\npwndbg> \r\n\r\n```\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32')\n module.long = lookup_types('long', 'int', 'i32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')\n\n module.int8 = lookup_types('char', 'i8')\n module.int16 = lookup_types('short', 'i16')\n module.int32 = lookup_types('int', 'i32')\n module.int64 = lookup_types('long long', 'long', 'i64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py"}]} | 3,642 | 80 |
gh_patches_debug_7338 | rasdani/github-patches | git_diff | mne-tools__mne-bids-1069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`key_press_event` is deprecated, needs to be replaced in our code
see: https://github.com/mne-tools/mne-bids/actions/runs/3145484445/jobs/5112832235#step:14:1665
</issue>
<code>
[start of doc/conf.py]
1 """Configure details for documentation with sphinx."""
2 import os
3 import sys
4 from datetime import date
5
6 import sphinx_gallery # noqa: F401
7 from sphinx_gallery.sorting import ExampleTitleSortKey
8
9 import mne_bids
10
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 curdir = os.path.dirname(__file__)
16 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))
17 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
18
19
20 # -- General configuration ------------------------------------------------
21
22 # If your documentation needs a minimal Sphinx version, state it here.
23 #
24 # needs_sphinx = '1.0'
25
26 # Add any Sphinx extension module names here, as strings. They can be
27 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
28 # ones.
29 extensions = [
30 'sphinx.ext.githubpages',
31 'sphinx.ext.autodoc',
32 'sphinx.ext.mathjax',
33 'sphinx.ext.viewcode',
34 'sphinx.ext.autosummary',
35 'sphinx.ext.doctest',
36 'sphinx.ext.intersphinx',
37 'sphinx_gallery.gen_gallery',
38 'numpydoc',
39 'sphinx_copybutton',
40 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py
41 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py
42 ]
43
44 # configure sphinx-copybutton
45 copybutton_prompt_text = r">>> |\.\.\. |\$ "
46 copybutton_prompt_is_regexp = True
47
48 # configure numpydoc
49 numpydoc_xref_param_type = True
50 numpydoc_class_members_toctree = False
51 numpydoc_attributes_as_param_list = True
52 numpydoc_xref_aliases = {
53 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',
54 'path-like': ':term:`path-like <mne:path-like>`',
55 'array-like': ':term:`array-like <mne:array-like>`',
56 'int': ':class:`int <python:int>`',
57 'bool': ':class:`bool <python:bool>`',
58 'float': ':class:`float <python:float>`',
59 'list': ':class:`list <python:list>`',
60 'tuple': ':class:`tuple <python:tuple>`',
61 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',
62 }
63 numpydoc_xref_ignore = {
64 # words
65 'instance', 'instances', 'of'
66 }
67
68
69 # generate autosummary even if no references
70 autosummary_generate = True
71 autodoc_default_options = {'inherited-members': None}
72 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix
73
74 # configure linkcheck
75 # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder
76 linkcheck_retries = 2
77 linkcheck_rate_limit_timeout = 15.0
78 linkcheck_ignore = [
79 r'https://www.researchgate.net/profile/.*',
80 ]
81
82 # The suffix(es) of source filenames.
83 # You can specify multiple suffix as a list of string:
84 #
85 # source_suffix = ['.rst', '.md']
86 source_suffix = '.rst'
87
88 # The master toctree document.
89 master_doc = 'index'
90
91 # General information about the project.
92 project = u'MNE-BIDS'
93 td = date.today()
94 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,
95 td.isoformat())
96
97 author = u'MNE Developers'
98
99 # The version info for the project you're documenting, acts as replacement for
100 # |version| and |release|, also used in various other places throughout the
101 # built documents.
102 #
103 # The short X.Y version.
104 version = mne_bids.__version__
105 # The full version, including alpha/beta/rc tags.
106 release = version
107
108 # List of patterns, relative to source directory, that match files and
109 # directories to ignore when looking for source files.
110 # This patterns also effect to html_static_path and html_extra_path
111 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',
112 '.DS_Store']
113
114 # HTML options (e.g., theme)
115 html_show_sourcelink = False
116 html_copy_source = False
117
118 html_theme = 'pydata_sphinx_theme'
119
120 # Add any paths that contain templates here, relative to this directory.
121 templates_path = ['_templates']
122 html_static_path = ['_static']
123 html_css_files = ['style.css']
124
125 # Theme options are theme-specific and customize the look and feel of a theme
126 # further. For a list of options available for each theme, see the
127 # documentation.
128 html_theme_options = {
129 'icon_links': [
130 dict(name='GitHub',
131 url='https://github.com/mne-tools/mne-bids',
132 icon='fab fa-github-square'),
133 dict(name='Discourse',
134 url='https://mne.discourse.group/tags/mne-bids',
135 icon='fab fa-discourse'),
136 ],
137 'icon_links_label': 'Quick Links', # for screen reader
138 'use_edit_page_button': False,
139 'navigation_with_keys': False,
140 'show_toc_level': 1,
141 'navbar_end': ['version-switcher', 'navbar-icon-links'],
142 }
143
144 html_context = {
145 'versions_dropdown': {
146 'dev': 'v0.11 (devel)',
147 'stable': 'v0.10 (stable)',
148 'v0.9': 'v0.9',
149 'v0.8': 'v0.8',
150 'v0.7': 'v0.7',
151 'v0.6': 'v0.6',
152 'v0.5': 'v0.5',
153 'v0.4': 'v0.4',
154 'v0.3': 'v0.3',
155 'v0.2': 'v0.2',
156 'v0.1': 'v0.1',
157 },
158 }
159
160 html_sidebars = {}
161
162 # Example configuration for intersphinx: refer to the Python standard library.
163 intersphinx_mapping = {
164 'python': ('https://docs.python.org/3', None),
165 'mne': ('https://mne.tools/dev', None),
166 'numpy': ('https://numpy.org/devdocs', None),
167 'scipy': ('https://scipy.github.io/devdocs', None),
168 'matplotlib': ('https://matplotlib.org', None),
169 'nilearn': ('http://nilearn.github.io/stable', None),
170 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),
171 'nibabel': ('https://nipy.org/nibabel', None),
172 }
173 intersphinx_timeout = 5
174
175 # Resolve binder filepath_prefix. From the docs:
176 # "A prefix to append to the filepath in the Binder links. You should use this
177 # if you will store your built documentation in a sub-folder of a repository,
178 # instead of in the root."
179 # we will store dev docs in a `dev` subdirectory and all other docs in a
180 # directory "v" + version_str. E.g., "v0.3"
181 if 'dev' in version:
182 filepath_prefix = 'dev'
183 else:
184 filepath_prefix = 'v{}'.format(version)
185
186 sphinx_gallery_conf = {
187 'doc_module': 'mne_bids',
188 'reference_url': {
189 'mne_bids': None,
190 },
191 'backreferences_dir': 'generated',
192 'examples_dirs': '../examples',
193 'within_subsection_order': ExampleTitleSortKey,
194 'gallery_dirs': 'auto_examples',
195 'filename_pattern': '^((?!sgskip).)*$',
196 'binder': {
197 # Required keys
198 'org': 'mne-tools',
199 'repo': 'mne-bids',
200 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
201 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
202 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.
203 'dependencies': [
204 '../test_requirements.txt',
205 './requirements.txt',
206 ],
207 }
208 }
209
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -52,7 +52,7 @@
numpydoc_xref_aliases = {
'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',
'path-like': ':term:`path-like <mne:path-like>`',
- 'array-like': ':term:`array-like <mne:array-like>`',
+ 'array-like': ':term:`array_like <numpy:array_like>`',
'int': ':class:`int <python:int>`',
'bool': ':class:`bool <python:bool>`',
'float': ':class:`float <python:float>`',
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -52,7 +52,7 @@\n numpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n- 'array-like': ':term:`array-like <mne:array-like>`',\n+ 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n", "issue": "`key_press_event` is deprecated, needs to be replaced in our code\nsee: https://github.com/mne-tools/mne-bids/actions/runs/3145484445/jobs/5112832235#step:14:1665\r\n\r\n\n", "before_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array-like <mne:array-like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.11 (devel)',\n 'stable': 'v0.10 (stable)',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]} | 3,004 | 160 |
gh_patches_debug_22523 | rasdani/github-patches | git_diff | borgbackup__borg-1207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: acl_access value must be surrogate-escaped str or None
`acl_get` in `platform/linux.pyx` returns type of `bytes` for `acl_access`, `acl_default`, `acl_extended`, `acl_nfs4` on Linux x86_64. Those `bytes` are not converted to `str` afterwards.
Relevant stacktrace:
```
File "/usr/lib/python3.5/site-packages/borg/archiver.py", line 332, in _process
status = archive.process_file(path, st, cache, self.ignore_inode)
File "/usr/lib/python3.5/site-packages/borg/archive.py", line 669, in process_file
item.update(self.stat_attrs(st, path))
File "/usr/lib/python3.5/site-packages/borg/item.py", line 40, in update
setattr(self, self._check_key(k), v)
File "/usr/lib/python3.5/site-packages/borg/item.py", line 95, in _set
raise TypeError(type_error_msg)
TypeError: acl_access value must be surrogate-escaped str or None
```
</issue>
<code>
[start of src/borg/item.py]
1 from .constants import ITEM_KEYS
2 from .helpers import safe_encode, safe_decode
3 from .helpers import bigint_to_int, int_to_bigint
4 from .helpers import StableDict
5
6
7 class PropDict:
8 """
9 Manage a dictionary via properties.
10
11 - initialization by giving a dict or kw args
12 - on initialization, normalize dict keys to be str type
13 - access dict via properties, like: x.key_name
14 - membership check via: 'key_name' in x
15 - optionally, encode when setting a value
16 - optionally, decode when getting a value
17 - be safe against typos in key names: check against VALID_KEYS
18 - when setting a value: check type of value
19 """
20 VALID_KEYS = None # override with <set of str> in child class
21
22 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
23
24 def __init__(self, data_dict=None, internal_dict=None, **kw):
25 if data_dict is None:
26 data = kw
27 elif not isinstance(data_dict, dict):
28 raise TypeError("data_dict must be dict")
29 else:
30 data = data_dict
31 self._dict = {}
32 self.update_internal(internal_dict or {})
33 self.update(data)
34
35 def update(self, d):
36 for k, v in d.items():
37 if isinstance(k, bytes):
38 k = k.decode()
39 setattr(self, self._check_key(k), v)
40
41 def update_internal(self, d):
42 for k, v in d.items():
43 if isinstance(k, bytes):
44 k = k.decode()
45 self._dict[k] = v
46
47 def __eq__(self, other):
48 return self.as_dict() == other.as_dict()
49
50 def __repr__(self):
51 return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict)
52
53 def as_dict(self):
54 """return the internal dictionary"""
55 return StableDict(self._dict)
56
57 def _check_key(self, key):
58 """make sure key is of type str and known"""
59 if not isinstance(key, str):
60 raise TypeError("key must be str")
61 if key not in self.VALID_KEYS:
62 raise ValueError("key '%s' is not a valid key" % key)
63 return key
64
65 def __contains__(self, key):
66 """do we have this key?"""
67 return self._check_key(key) in self._dict
68
69 def get(self, key, default=None):
70 """get value for key, return default if key does not exist"""
71 return getattr(self, self._check_key(key), default)
72
73 @staticmethod
74 def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):
75 """return a property that deals with self._dict[key]"""
76 assert isinstance(key, str)
77 if value_type_name is None:
78 value_type_name = value_type.__name__
79 doc = "%s (%s)" % (key, value_type_name)
80 type_error_msg = "%s value must be %s" % (key, value_type_name)
81 attr_error_msg = "attribute %s not found" % key
82
83 def _get(self):
84 try:
85 value = self._dict[key]
86 except KeyError:
87 raise AttributeError(attr_error_msg) from None
88 if decode is not None:
89 value = decode(value)
90 return value
91
92 def _set(self, value):
93 if not isinstance(value, value_type):
94 raise TypeError(type_error_msg)
95 if encode is not None:
96 value = encode(value)
97 self._dict[key] = value
98
99 def _del(self):
100 try:
101 del self._dict[key]
102 except KeyError:
103 raise AttributeError(attr_error_msg) from None
104
105 return property(_get, _set, _del, doc=doc)
106
107
108 class Item(PropDict):
109 """
110 Item abstraction that deals with validation and the low-level details internally:
111
112 Items are created either from msgpack unpacker output, from another dict, from kwargs or
113 built step-by-step by setting attributes.
114
115 msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.
116 msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting
117 such a property and encode when setting it.
118
119 If an Item shall be serialized, give as_dict() method output to msgpack packer.
120 """
121
122 VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys
123
124 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
125
126 # properties statically defined, so that IDEs can know their names:
127
128 path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
129 source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
130 user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
131 group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
132
133 acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
134 acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
135 acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
136 acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
137
138 mode = PropDict._make_property('mode', int)
139 uid = PropDict._make_property('uid', int)
140 gid = PropDict._make_property('gid', int)
141 rdev = PropDict._make_property('rdev', int)
142 bsdflags = PropDict._make_property('bsdflags', int)
143
144 atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
145 ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
146 mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
147
148 hardlink_master = PropDict._make_property('hardlink_master', bool)
149
150 chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None')
151
152 xattrs = PropDict._make_property('xattrs', StableDict)
153
154 deleted = PropDict._make_property('deleted', bool)
155 nlink = PropDict._make_property('nlink', int)
156
157
158 class EncryptedKey(PropDict):
159 """
160 EncryptedKey abstraction that deals with validation and the low-level details internally:
161
162 A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or
163 built step-by-step by setting attributes.
164
165 msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.
166
167 If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.
168 """
169
170 VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys
171
172 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
173
174 version = PropDict._make_property('version', int)
175 algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode)
176 iterations = PropDict._make_property('iterations', int)
177 salt = PropDict._make_property('salt', bytes)
178 hash = PropDict._make_property('hash', bytes)
179 data = PropDict._make_property('data', bytes)
180
181
182 class Key(PropDict):
183 """
184 Key abstraction that deals with validation and the low-level details internally:
185
186 A Key is created either from msgpack unpacker output, from another dict, from kwargs or
187 built step-by-step by setting attributes.
188
189 msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.
190
191 If a Key shall be serialized, give as_dict() method output to msgpack packer.
192 """
193
194 VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'} # str-typed keys
195
196 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
197
198 version = PropDict._make_property('version', int)
199 repository_id = PropDict._make_property('repository_id', bytes)
200 enc_key = PropDict._make_property('enc_key', bytes)
201 enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes)
202 id_key = PropDict._make_property('id_key', bytes)
203 chunk_seed = PropDict._make_property('chunk_seed', int)
204
[end of src/borg/item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/borg/item.py b/src/borg/item.py
--- a/src/borg/item.py
+++ b/src/borg/item.py
@@ -130,10 +130,10 @@
user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
- acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
+ acl_access = PropDict._make_property('acl_access', bytes)
+ acl_default = PropDict._make_property('acl_default', bytes)
+ acl_extended = PropDict._make_property('acl_extended', bytes)
+ acl_nfs4 = PropDict._make_property('acl_nfs4', bytes)
mode = PropDict._make_property('mode', int)
uid = PropDict._make_property('uid', int)
| {"golden_diff": "diff --git a/src/borg/item.py b/src/borg/item.py\n--- a/src/borg/item.py\n+++ b/src/borg/item.py\n@@ -130,10 +130,10 @@\n user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n \n- acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n+ acl_access = PropDict._make_property('acl_access', bytes)\n+ acl_default = PropDict._make_property('acl_default', bytes)\n+ acl_extended = PropDict._make_property('acl_extended', bytes)\n+ acl_nfs4 = PropDict._make_property('acl_nfs4', bytes)\n \n mode = PropDict._make_property('mode', int)\n uid = PropDict._make_property('uid', int)\n", "issue": "TypeError: acl_access value must be surrogate-escaped str or None\n`acl_get` in `platform/linux.pyx` returns type of `bytes` for `acl_access`, `acl_default`, `acl_extended`, `acl_nfs4` on Linux x86_64. Those `bytes` are not converted to `str` afterwards.\n\nRelevant stacktrace:\n\n```\nFile \"/usr/lib/python3.5/site-packages/borg/archiver.py\", line 332, in _process\nstatus = archive.process_file(path, st, cache, self.ignore_inode)\nFile \"/usr/lib/python3.5/site-packages/borg/archive.py\", line 669, in process_file\nitem.update(self.stat_attrs(st, path))\nFile \"/usr/lib/python3.5/site-packages/borg/item.py\", line 40, in update\nsetattr(self, self._check_key(k), v)\nFile \"/usr/lib/python3.5/site-packages/borg/item.py\", line 95, in _set\nraise TypeError(type_error_msg)\nTypeError: acl_access value must be surrogate-escaped str or None\n```\n\n", "before_files": [{"content": "from .constants import ITEM_KEYS\nfrom .helpers import safe_encode, safe_decode\nfrom .helpers import bigint_to_int, int_to_bigint\nfrom .helpers import StableDict\n\n\nclass PropDict:\n \"\"\"\n Manage a dictionary via properties.\n\n - initialization by giving a dict or kw args\n - on initialization, normalize dict keys to be str type\n - access dict via properties, like: x.key_name\n - membership check via: 'key_name' in x\n - optionally, encode when setting a value\n - optionally, decode when getting a value\n - be safe against typos in key names: check against VALID_KEYS\n - when setting a value: check type of value\n \"\"\"\n VALID_KEYS = None # override with <set of str> in child class\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n def __init__(self, data_dict=None, internal_dict=None, **kw):\n if data_dict is None:\n data = kw\n elif not isinstance(data_dict, dict):\n raise TypeError(\"data_dict must be dict\")\n else:\n data = data_dict\n self._dict = {}\n self.update_internal(internal_dict or {})\n self.update(data)\n\n def update(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n setattr(self, self._check_key(k), v)\n\n def update_internal(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n self._dict[k] = v\n\n def __eq__(self, other):\n return self.as_dict() == other.as_dict()\n\n def __repr__(self):\n return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict)\n\n def as_dict(self):\n \"\"\"return the internal dictionary\"\"\"\n return StableDict(self._dict)\n\n def _check_key(self, key):\n \"\"\"make sure key is of type str and known\"\"\"\n if not isinstance(key, str):\n raise TypeError(\"key must be str\")\n if key not in self.VALID_KEYS:\n raise ValueError(\"key '%s' is not a valid key\" % key)\n return key\n\n def __contains__(self, key):\n \"\"\"do we have this key?\"\"\"\n return self._check_key(key) in self._dict\n\n def get(self, key, default=None):\n \"\"\"get value for key, return default if key does not exist\"\"\"\n return getattr(self, self._check_key(key), default)\n\n @staticmethod\n def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):\n \"\"\"return a property that deals with self._dict[key]\"\"\"\n assert isinstance(key, str)\n if value_type_name is None:\n value_type_name = value_type.__name__\n doc = \"%s (%s)\" % (key, value_type_name)\n type_error_msg = \"%s value must be %s\" % (key, value_type_name)\n attr_error_msg = \"attribute %s not found\" % key\n\n def _get(self):\n try:\n value = self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n if decode is not None:\n value = decode(value)\n return value\n\n def _set(self, value):\n if not isinstance(value, value_type):\n raise TypeError(type_error_msg)\n if encode is not None:\n value = encode(value)\n self._dict[key] = value\n\n def _del(self):\n try:\n del self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n\n return property(_get, _set, _del, doc=doc)\n\n\nclass Item(PropDict):\n \"\"\"\n Item abstraction that deals with validation and the low-level details internally:\n\n Items are created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.\n msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting\n such a property and encode when setting it.\n\n If an Item shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n # properties statically defined, so that IDEs can know their names:\n\n path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n\n acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n\n mode = PropDict._make_property('mode', int)\n uid = PropDict._make_property('uid', int)\n gid = PropDict._make_property('gid', int)\n rdev = PropDict._make_property('rdev', int)\n bsdflags = PropDict._make_property('bsdflags', int)\n\n atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n\n hardlink_master = PropDict._make_property('hardlink_master', bool)\n\n chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None')\n\n xattrs = PropDict._make_property('xattrs', StableDict)\n\n deleted = PropDict._make_property('deleted', bool)\n nlink = PropDict._make_property('nlink', int)\n\n\nclass EncryptedKey(PropDict):\n \"\"\"\n EncryptedKey abstraction that deals with validation and the low-level details internally:\n\n A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.\n\n If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode)\n iterations = PropDict._make_property('iterations', int)\n salt = PropDict._make_property('salt', bytes)\n hash = PropDict._make_property('hash', bytes)\n data = PropDict._make_property('data', bytes)\n\n\nclass Key(PropDict):\n \"\"\"\n Key abstraction that deals with validation and the low-level details internally:\n\n A Key is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.\n\n If a Key shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n repository_id = PropDict._make_property('repository_id', bytes)\n enc_key = PropDict._make_property('enc_key', bytes)\n enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes)\n id_key = PropDict._make_property('id_key', bytes)\n chunk_seed = PropDict._make_property('chunk_seed', int)\n", "path": "src/borg/item.py"}]} | 3,320 | 352 |
gh_patches_debug_17373 | rasdani/github-patches | git_diff | falconry__falcon-559 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing HTTP status codes
There's a few status codes from here http://tools.ietf.org/html/rfc6585 that could be great including.
429 Too many requests for instance.
</issue>
<code>
[start of falcon/status_codes.py]
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 HTTP_100 = '100 Continue'
17 HTTP_CONTINUE = HTTP_100
18 HTTP_101 = '101 Switching Protocols'
19 HTTP_SWITCHING_PROTOCOLS = HTTP_101
20
21 HTTP_200 = '200 OK'
22 HTTP_OK = HTTP_200
23 HTTP_201 = '201 Created'
24 HTTP_CREATED = HTTP_201
25 HTTP_202 = '202 Accepted'
26 HTTP_ACCEPTED = HTTP_202
27 HTTP_203 = '203 Non-Authoritative Information'
28 HTTP_NON_AUTHORITATIVE_INFORMATION = HTTP_203
29 HTTP_204 = '204 No Content'
30 HTTP_NO_CONTENT = HTTP_204
31 HTTP_205 = '205 Reset Content'
32 HTTP_RESET_CONTENT = HTTP_205
33 HTTP_206 = '206 Partial Content'
34 HTTP_PARTIAL_CONTENT = HTTP_206
35 HTTP_226 = '226 IM Used'
36 HTTP_IM_USED = HTTP_226
37
38 HTTP_300 = '300 Multiple Choices'
39 HTTP_MULTIPLE_CHOICES = HTTP_300
40 HTTP_301 = '301 Moved Permanently'
41 HTTP_MOVED_PERMANENTLY = HTTP_301
42 HTTP_302 = '302 Found'
43 HTTP_FOUND = HTTP_302
44 HTTP_303 = '303 See Other'
45 HTTP_SEE_OTHER = HTTP_303
46 HTTP_304 = '304 Not Modified'
47 HTTP_NOT_MODIFIED = HTTP_304
48 HTTP_305 = '305 Use Proxy'
49 HTTP_USE_PROXY = HTTP_305
50 HTTP_307 = '307 Temporary Redirect'
51 HTTP_TEMPORARY_REDIRECT = HTTP_307
52
53 HTTP_400 = '400 Bad Request'
54 HTTP_BAD_REQUEST = HTTP_400
55 HTTP_401 = '401 Unauthorized' # <-- Really means "unauthenticated"
56 HTTP_UNAUTHORIZED = HTTP_401
57 HTTP_402 = '402 Payment Required'
58 HTTP_PAYMENT_REQUIRED = HTTP_402
59 HTTP_403 = '403 Forbidden' # <-- Really means "unauthorized"
60 HTTP_FORBIDDEN = HTTP_403
61 HTTP_404 = '404 Not Found'
62 HTTP_NOT_FOUND = HTTP_404
63 HTTP_405 = '405 Method Not Allowed'
64 HTTP_METHOD_NOT_ALLOWED = HTTP_405
65 HTTP_406 = '406 Not Acceptable'
66 HTTP_NOT_ACCEPTABLE = HTTP_406
67 HTTP_407 = '407 Proxy Authentication Required'
68 HTTP_PROXY_AUTHENTICATION_REQUIRED = HTTP_407
69 HTTP_408 = '408 Request Time-out'
70 HTTP_REQUEST_TIMEOUT = HTTP_408
71 HTTP_409 = '409 Conflict'
72 HTTP_CONFLICT = HTTP_409
73 HTTP_410 = '410 Gone'
74 HTTP_GONE = HTTP_410
75 HTTP_411 = '411 Length Required'
76 HTTP_LENGTH_REQUIRED = HTTP_411
77 HTTP_412 = '412 Precondition Failed'
78 HTTP_PRECONDITION_FAILED = HTTP_412
79 HTTP_413 = '413 Payload Too Large'
80 HTTP_REQUEST_ENTITY_TOO_LARGE = HTTP_413
81 HTTP_414 = '414 URI Too Long'
82 HTTP_REQUEST_URI_TOO_LONG = HTTP_414
83 HTTP_415 = '415 Unsupported Media Type'
84 HTTP_UNSUPPORTED_MEDIA_TYPE = HTTP_415
85 HTTP_416 = '416 Range Not Satisfiable'
86 HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416
87 HTTP_417 = '417 Expectation Failed'
88 HTTP_EXPECTATION_FAILED = HTTP_417
89 HTTP_418 = "418 I'm a teapot"
90 HTTP_IM_A_TEAPOT = HTTP_418
91 HTTP_426 = '426 Upgrade Required'
92 HTTP_UPGRADE_REQUIRED = HTTP_426
93
94 HTTP_500 = '500 Internal Server Error'
95 HTTP_INTERNAL_SERVER_ERROR = HTTP_500
96 HTTP_501 = '501 Not Implemented'
97 HTTP_NOT_IMPLEMENTED = HTTP_501
98 HTTP_502 = '502 Bad Gateway'
99 HTTP_BAD_GATEWAY = HTTP_502
100 HTTP_503 = '503 Service Unavailable'
101 HTTP_SERVICE_UNAVAILABLE = HTTP_503
102 HTTP_504 = '504 Gateway Time-out'
103 HTTP_GATEWAY_TIMEOUT = HTTP_504
104 HTTP_505 = '505 HTTP Version not supported'
105 HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505
106
107 # 70X - Inexcusable
108 HTTP_701 = '701 Meh'
109 HTTP_702 = '702 Emacs'
110 HTTP_703 = '703 Explosion'
111
112 # 71X - Novelty Implementations
113 HTTP_710 = '710 PHP'
114 HTTP_711 = '711 Convenience Store'
115 HTTP_712 = '712 NoSQL'
116 HTTP_719 = '719 I am not a teapot'
117
118 # 72X - Edge Cases
119 HTTP_720 = '720 Unpossible'
120 HTTP_721 = '721 Known Unknowns'
121 HTTP_722 = '722 Unknown Unknowns'
122 HTTP_723 = '723 Tricky'
123 HTTP_724 = '724 This line should be unreachable'
124 HTTP_725 = '725 It works on my machine'
125 HTTP_726 = "726 It's a feature, not a bug"
126 HTTP_727 = '727 32 bits is plenty'
127
128 # 74X - Meme Driven
129 HTTP_740 = '740 Computer says no'
130 HTTP_741 = '741 Compiling'
131 HTTP_742 = '742 A kitten dies'
132 HTTP_743 = '743 I thought I knew regular expressions'
133 HTTP_744 = '744 Y U NO write integration tests?'
134 HTTP_745 = ("745 I don't always test my code, but when I do"
135 "I do it in production")
136 HTTP_748 = '748 Confounded by Ponies'
137 HTTP_749 = '749 Reserved for Chuck Norris'
138
139 # 75X - Syntax Errors
140 HTTP_750 = "750 Didn't bother to compile it"
141 HTTP_753 = '753 Syntax Error'
142 HTTP_754 = '754 Too many semi-colons'
143 HTTP_755 = '755 Not enough semi-colons'
144 HTTP_759 = '759 Unexpected T_PAAMAYIM_NEKUDOTAYIM'
145
146 # 77X - Predictable Problems
147 HTTP_771 = '771 Cached for too long'
148 HTTP_772 = '772 Not cached long enough'
149 HTTP_773 = '773 Not cached at all'
150 HTTP_774 = '774 Why was this cached?'
151 HTTP_776 = '776 Error on the Exception'
152 HTTP_777 = '777 Coincidence'
153 HTTP_778 = '778 Off By One Error'
154 HTTP_779 = '779 Off By Too Many To Count Error'
155
156 # 78X - Somebody Else's Problem
157 HTTP_780 = '780 Project owner not responding'
158 HTTP_781 = '781 Operations'
159 HTTP_782 = '782 QA'
160 HTTP_783 = '783 It was a customer request, honestly'
161 HTTP_784 = '784 Management, obviously'
162 HTTP_785 = '785 TPS Cover Sheet not attached'
163 HTTP_786 = '786 Try it now'
164
165 # 79X - Internet crashed
166 HTTP_791 = '791 The Internet shut down due to copyright restrictions'
167 HTTP_792 = '792 Climate change driven catastrophic weather event'
168 HTTP_797 = '797 This is the last page of the Internet. Go back'
169 HTTP_799 = '799 End of the world'
170
[end of falcon/status_codes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/status_codes.py b/falcon/status_codes.py
--- a/falcon/status_codes.py
+++ b/falcon/status_codes.py
@@ -90,6 +90,12 @@
HTTP_IM_A_TEAPOT = HTTP_418
HTTP_426 = '426 Upgrade Required'
HTTP_UPGRADE_REQUIRED = HTTP_426
+HTTP_428 = '428 Precondition Required'
+HTTP_PRECONDITION_REQUIRED = HTTP_428
+HTTP_429 = '429 Too Many Requests'
+HTTP_TOO_MANY_REQUESTS = HTTP_429
+HTTP_431 = '431 Request Header Fields Too Large'
+HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = HTTP_431
HTTP_500 = '500 Internal Server Error'
HTTP_INTERNAL_SERVER_ERROR = HTTP_500
@@ -103,6 +109,8 @@
HTTP_GATEWAY_TIMEOUT = HTTP_504
HTTP_505 = '505 HTTP Version not supported'
HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505
+HTTP_511 = '511 Network Authentication Required'
+HTTP_NETWORK_AUTHENTICATION_REQUIRED = HTTP_511
# 70X - Inexcusable
HTTP_701 = '701 Meh'
| {"golden_diff": "diff --git a/falcon/status_codes.py b/falcon/status_codes.py\n--- a/falcon/status_codes.py\n+++ b/falcon/status_codes.py\n@@ -90,6 +90,12 @@\n HTTP_IM_A_TEAPOT = HTTP_418\n HTTP_426 = '426 Upgrade Required'\n HTTP_UPGRADE_REQUIRED = HTTP_426\n+HTTP_428 = '428 Precondition Required'\n+HTTP_PRECONDITION_REQUIRED = HTTP_428\n+HTTP_429 = '429 Too Many Requests'\n+HTTP_TOO_MANY_REQUESTS = HTTP_429\n+HTTP_431 = '431 Request Header Fields Too Large'\n+HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = HTTP_431\n \n HTTP_500 = '500 Internal Server Error'\n HTTP_INTERNAL_SERVER_ERROR = HTTP_500\n@@ -103,6 +109,8 @@\n HTTP_GATEWAY_TIMEOUT = HTTP_504\n HTTP_505 = '505 HTTP Version not supported'\n HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505\n+HTTP_511 = '511 Network Authentication Required'\n+HTTP_NETWORK_AUTHENTICATION_REQUIRED = HTTP_511\n \n # 70X - Inexcusable\n HTTP_701 = '701 Meh'\n", "issue": "Missing HTTP status codes\nThere's a few status codes from here http://tools.ietf.org/html/rfc6585 that could be great including.\n\n429 Too many requests for instance.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nHTTP_100 = '100 Continue'\nHTTP_CONTINUE = HTTP_100\nHTTP_101 = '101 Switching Protocols'\nHTTP_SWITCHING_PROTOCOLS = HTTP_101\n\nHTTP_200 = '200 OK'\nHTTP_OK = HTTP_200\nHTTP_201 = '201 Created'\nHTTP_CREATED = HTTP_201\nHTTP_202 = '202 Accepted'\nHTTP_ACCEPTED = HTTP_202\nHTTP_203 = '203 Non-Authoritative Information'\nHTTP_NON_AUTHORITATIVE_INFORMATION = HTTP_203\nHTTP_204 = '204 No Content'\nHTTP_NO_CONTENT = HTTP_204\nHTTP_205 = '205 Reset Content'\nHTTP_RESET_CONTENT = HTTP_205\nHTTP_206 = '206 Partial Content'\nHTTP_PARTIAL_CONTENT = HTTP_206\nHTTP_226 = '226 IM Used'\nHTTP_IM_USED = HTTP_226\n\nHTTP_300 = '300 Multiple Choices'\nHTTP_MULTIPLE_CHOICES = HTTP_300\nHTTP_301 = '301 Moved Permanently'\nHTTP_MOVED_PERMANENTLY = HTTP_301\nHTTP_302 = '302 Found'\nHTTP_FOUND = HTTP_302\nHTTP_303 = '303 See Other'\nHTTP_SEE_OTHER = HTTP_303\nHTTP_304 = '304 Not Modified'\nHTTP_NOT_MODIFIED = HTTP_304\nHTTP_305 = '305 Use Proxy'\nHTTP_USE_PROXY = HTTP_305\nHTTP_307 = '307 Temporary Redirect'\nHTTP_TEMPORARY_REDIRECT = HTTP_307\n\nHTTP_400 = '400 Bad Request'\nHTTP_BAD_REQUEST = HTTP_400\nHTTP_401 = '401 Unauthorized' # <-- Really means \"unauthenticated\"\nHTTP_UNAUTHORIZED = HTTP_401\nHTTP_402 = '402 Payment Required'\nHTTP_PAYMENT_REQUIRED = HTTP_402\nHTTP_403 = '403 Forbidden' # <-- Really means \"unauthorized\"\nHTTP_FORBIDDEN = HTTP_403\nHTTP_404 = '404 Not Found'\nHTTP_NOT_FOUND = HTTP_404\nHTTP_405 = '405 Method Not Allowed'\nHTTP_METHOD_NOT_ALLOWED = HTTP_405\nHTTP_406 = '406 Not Acceptable'\nHTTP_NOT_ACCEPTABLE = HTTP_406\nHTTP_407 = '407 Proxy Authentication Required'\nHTTP_PROXY_AUTHENTICATION_REQUIRED = HTTP_407\nHTTP_408 = '408 Request Time-out'\nHTTP_REQUEST_TIMEOUT = HTTP_408\nHTTP_409 = '409 Conflict'\nHTTP_CONFLICT = HTTP_409\nHTTP_410 = '410 Gone'\nHTTP_GONE = HTTP_410\nHTTP_411 = '411 Length Required'\nHTTP_LENGTH_REQUIRED = HTTP_411\nHTTP_412 = '412 Precondition Failed'\nHTTP_PRECONDITION_FAILED = HTTP_412\nHTTP_413 = '413 Payload Too Large'\nHTTP_REQUEST_ENTITY_TOO_LARGE = HTTP_413\nHTTP_414 = '414 URI Too Long'\nHTTP_REQUEST_URI_TOO_LONG = HTTP_414\nHTTP_415 = '415 Unsupported Media Type'\nHTTP_UNSUPPORTED_MEDIA_TYPE = HTTP_415\nHTTP_416 = '416 Range Not Satisfiable'\nHTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416\nHTTP_417 = '417 Expectation Failed'\nHTTP_EXPECTATION_FAILED = HTTP_417\nHTTP_418 = \"418 I'm a teapot\"\nHTTP_IM_A_TEAPOT = HTTP_418\nHTTP_426 = '426 Upgrade Required'\nHTTP_UPGRADE_REQUIRED = HTTP_426\n\nHTTP_500 = '500 Internal Server Error'\nHTTP_INTERNAL_SERVER_ERROR = HTTP_500\nHTTP_501 = '501 Not Implemented'\nHTTP_NOT_IMPLEMENTED = HTTP_501\nHTTP_502 = '502 Bad Gateway'\nHTTP_BAD_GATEWAY = HTTP_502\nHTTP_503 = '503 Service Unavailable'\nHTTP_SERVICE_UNAVAILABLE = HTTP_503\nHTTP_504 = '504 Gateway Time-out'\nHTTP_GATEWAY_TIMEOUT = HTTP_504\nHTTP_505 = '505 HTTP Version not supported'\nHTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505\n\n# 70X - Inexcusable\nHTTP_701 = '701 Meh'\nHTTP_702 = '702 Emacs'\nHTTP_703 = '703 Explosion'\n\n# 71X - Novelty Implementations\nHTTP_710 = '710 PHP'\nHTTP_711 = '711 Convenience Store'\nHTTP_712 = '712 NoSQL'\nHTTP_719 = '719 I am not a teapot'\n\n# 72X - Edge Cases\nHTTP_720 = '720 Unpossible'\nHTTP_721 = '721 Known Unknowns'\nHTTP_722 = '722 Unknown Unknowns'\nHTTP_723 = '723 Tricky'\nHTTP_724 = '724 This line should be unreachable'\nHTTP_725 = '725 It works on my machine'\nHTTP_726 = \"726 It's a feature, not a bug\"\nHTTP_727 = '727 32 bits is plenty'\n\n# 74X - Meme Driven\nHTTP_740 = '740 Computer says no'\nHTTP_741 = '741 Compiling'\nHTTP_742 = '742 A kitten dies'\nHTTP_743 = '743 I thought I knew regular expressions'\nHTTP_744 = '744 Y U NO write integration tests?'\nHTTP_745 = (\"745 I don't always test my code, but when I do\"\n \"I do it in production\")\nHTTP_748 = '748 Confounded by Ponies'\nHTTP_749 = '749 Reserved for Chuck Norris'\n\n# 75X - Syntax Errors\nHTTP_750 = \"750 Didn't bother to compile it\"\nHTTP_753 = '753 Syntax Error'\nHTTP_754 = '754 Too many semi-colons'\nHTTP_755 = '755 Not enough semi-colons'\nHTTP_759 = '759 Unexpected T_PAAMAYIM_NEKUDOTAYIM'\n\n# 77X - Predictable Problems\nHTTP_771 = '771 Cached for too long'\nHTTP_772 = '772 Not cached long enough'\nHTTP_773 = '773 Not cached at all'\nHTTP_774 = '774 Why was this cached?'\nHTTP_776 = '776 Error on the Exception'\nHTTP_777 = '777 Coincidence'\nHTTP_778 = '778 Off By One Error'\nHTTP_779 = '779 Off By Too Many To Count Error'\n\n# 78X - Somebody Else's Problem\nHTTP_780 = '780 Project owner not responding'\nHTTP_781 = '781 Operations'\nHTTP_782 = '782 QA'\nHTTP_783 = '783 It was a customer request, honestly'\nHTTP_784 = '784 Management, obviously'\nHTTP_785 = '785 TPS Cover Sheet not attached'\nHTTP_786 = '786 Try it now'\n\n# 79X - Internet crashed\nHTTP_791 = '791 The Internet shut down due to copyright restrictions'\nHTTP_792 = '792 Climate change driven catastrophic weather event'\nHTTP_797 = '797 This is the last page of the Internet. Go back'\nHTTP_799 = '799 End of the world'\n", "path": "falcon/status_codes.py"}]} | 3,005 | 295 |
gh_patches_debug_17285 | rasdani/github-patches | git_diff | weecology__retriever-427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add list of all available datasets to the documentation
It would be nice to have a page of the documentation that is an auto-generated list of all of the currently available datasets. Each item would have the text from the `name` attribute and be linked to the `url` or `ref` attribute if available.
</issue>
<code>
[start of docs/conf.py]
1 import sys, os
2 from retriever import VERSION,COPYRIGHT
3 needs_sphinx = '1.3'
4
5 # Add any Sphinx extension module names here, as strings.
6 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
7
8 # Add any paths that contain templates here, relative to this directory.
9 templates_path = ['_templates']
10
11 # The suffix of source filenames.
12 source_suffix = '.rst'
13
14 # The encoding of source files.
15 # source_encoding = 'utf-8-sig'
16
17 # The master toctree document.
18 master_doc = 'index'
19
20 # General information about the project.
21 project = u'EcoData Retriever'
22 copyright = COPYRIGHT
23
24 version = release = VERSION
25
26 # List of patterns, relative to source directory, that match files and
27 # directories to ignore when looking for source files.
28 exclude_patterns = []
29
30 # The reST default role (used for this markup: `text`) to use for all documents.
31 #default_role = None
32
33 # If true, '()' will be appended to :func: etc. cross-reference text.
34 #add_function_parentheses = True
35
36 # If true, the current module name will be prepended to all description
37 # unit titles (such as .. function::).
38 #add_module_names = True
39
40 # If true, sectionauthor and moduleauthor directives will be shown in the
41 # output. They are ignored by default.
42 #show_authors = False
43
44 # The name of the Pygments (syntax highlighting) style to use.
45 pygments_style = 'sphinx'
46
47 # A list of ignored prefixes for module index sorting.
48 #modindex_common_prefix = []
49
50
51 # -- Options for HTML output ---------------------------------------------------
52
53 # The theme to use for HTML and HTML Help pages. See the documentation for
54 # a list of builtin themes.
55 html_theme = 'classic'
56
57 # Theme options are theme-specific and customize the look and feel of a theme
58 # further. For a list of options available for each theme, see the
59 # documentation.
60 #html_theme_options = {}
61
62 # Add any paths that contain custom themes here, relative to this directory.
63 #html_theme_path = []
64
65 # The name for this set of Sphinx documents. If None, it defaults to
66 # "<project> v<release> documentation".
67 #html_title = None
68
69 # A shorter title for the navigation bar. Default is the same as html_title.
70 #html_short_title = None
71
72 # The name of an image file (relative to this directory) to place at the top
73 # of the sidebar.
74 #html_logo = None
75
76 # The name of an image file (within the static path) to use as favicon of the
77 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
78 # pixels large.
79 #html_favicon = None
80
81 # Add any paths that contain custom static files (such as style sheets) here,
82 # relative to this directory. They are copied after the builtin static files,
83 # so a file named "default.css" will overwrite the builtin "default.css".
84
85
86 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
87 # using the given strftime format.
88 #html_last_updated_fmt = '%b %d, %Y'
89
90 # If true, SmartyPants will be used to convert quotes and dashes to
91 # typographically correct entities.
92 #html_use_smartypants = True
93
94 # Custom sidebar templates, maps document names to template names.
95 #html_sidebars = {}
96
97 # Additional templates that should be rendered to pages, maps page names to
98 # template names.
99 #html_additional_pages = {}
100
101 # If false, no module index is generated.
102 #html_domain_indices = True
103
104 # If false, no index is generated.
105 #html_use_index = True
106
107 # If true, the index is split into individual pages for each letter.
108 #html_split_index = False
109
110 # If true, links to the reST sources are added to the pages.
111 #html_show_sourcelink = True
112
113 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
114 #html_show_sphinx = True
115
116 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
117 #html_show_copyright = True
118
119 # If true, an OpenSearch description file will be output, and all pages will
120 # contain a <link> tag referring to it. The value of this option must be the
121 # base URL from which the finished HTML is served.
122 #html_use_opensearch = ''
123
124 # This is the file name suffix for HTML files (e.g. ".xhtml").
125 #html_file_suffix = None
126
127 # Output file base name for HTML help builder.
128
129
130
131 # -- Options for LaTeX output --------------------------------------------------
132
133 latex_elements = {
134 # The paper size ('letterpaper' or 'a4paper').
135 #'papersize': 'letterpaper',
136
137 # The font size ('10pt', '11pt' or '12pt').
138 #'pointsize': '10pt',
139
140 # Additional stuff for the LaTeX preamble.
141 #'preamble': '',
142 }
143
144 # Grouping the document tree into LaTeX files. List of tuples
145 # (source start file, target name, title, author, documentclass [howto/manual]).
146
147 # The name of an image file (relative to this directory) to place at the top of
148 # the title page.
149 #latex_logo = None
150
151 # For "manual" documents, if this is true, then toplevel headings are parts,
152 # not chapters.
153 #latex_use_parts = False
154
155 # If true, show page references after internal links.
156 #latex_show_pagerefs = False
157
158 # If true, show URL addresses after external links.
159 #latex_show_urls = False
160
161 # Documents to append as an appendix to all manuals.
162 #latex_appendices = []
163
164 # If false, no module index is generated.
165 #latex_domain_indices = True
166
167
168 # -- Options for manual page output --------------------------------------------
169
170 # One entry per manual page. List of tuples
171
172 # If true, show URL addresses after external links.
173 #man_show_urls = False
174
175
176 # -- Options for Texinfo output ------------------------------------------------
177
178 # Grouping the document tree into Texinfo files. List of tuples
179 # (source start file, target name, title, author,
180 # dir menu entry, description, category)
181
182
183 # Documents to append as an appendix to all manuals.
184 #texinfo_appendices = []
185
186 # If false, no module index is generated.
187 #texinfo_domain_indices = True
188
189 # How to display URL addresses: 'footnote', 'no', or 'inline'.
190 #texinfo_show_urls = 'footnote'
191
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,33 @@
-import sys, os
from retriever import VERSION,COPYRIGHT
+from retriever.lib.repository import check_for_updates
+from retriever import SCRIPT_LIST
+
+# Create the .rst file for the available datasets
+datasetfile = open("datasets.rst", "w")
+datasetfile_title = """
+==================
+Datasets Available
+==================
+
+
+"""
+check_for_updates(graphical=False)
+script_list = SCRIPT_LIST()
+
+# write the title of dataset rst file
+datasetfile.write(datasetfile_title)
+
+# get info from the scripts
+for script_num, script in enumerate(script_list, start=1):
+ if script.ref.strip():
+ reference_link = script.ref
+ elif bool(script.urls.values()):
+ reference_link = script.urls.values()[0].rpartition('/')[0]
+ else:
+ reference_link = ""
+ datasetfile.write("| " + str(script_num) + ". **{}** \n| shortname: {}\n| reference: {}\n\n".format(script.name, script.shortname, reference_link))
+datasetfile.close()
+
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings.
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,5 +1,33 @@\n-import sys, os\n from retriever import VERSION,COPYRIGHT\n+from retriever.lib.repository import check_for_updates\n+from retriever import SCRIPT_LIST\n+\n+# Create the .rst file for the available datasets\n+datasetfile = open(\"datasets.rst\", \"w\")\n+datasetfile_title = \"\"\"\n+==================\n+Datasets Available\n+==================\n+\n+\n+\"\"\"\n+check_for_updates(graphical=False)\n+script_list = SCRIPT_LIST()\n+\n+# write the title of dataset rst file\n+datasetfile.write(datasetfile_title)\n+\n+# get info from the scripts\n+for script_num, script in enumerate(script_list, start=1):\n+ if script.ref.strip():\n+ reference_link = script.ref\n+ elif bool(script.urls.values()):\n+ reference_link = script.urls.values()[0].rpartition('/')[0]\n+ else:\n+ reference_link = \"\"\n+ datasetfile.write(\"| \" + str(script_num) + \". **{}** \\n| shortname: {}\\n| reference: {}\\n\\n\".format(script.name, script.shortname, reference_link))\n+datasetfile.close()\n+\n needs_sphinx = '1.3'\n \n # Add any Sphinx extension module names here, as strings.\n", "issue": "Add list of all available datasets to the documentation\nIt would be nice to have a page of the documentation that is an auto-generated list of all of the currently available datasets. Each item would have the text from the `name` attribute and be linked to the `url` or `ref` attribute if available.\n\n", "before_files": [{"content": "import sys, os\nfrom retriever import VERSION,COPYRIGHT\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'EcoData Retriever'\ncopyright = COPYRIGHT\n\nversion = release = VERSION\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'classic'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n \n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\n \n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\n \n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n \n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\n \n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "docs/conf.py"}]} | 2,495 | 290 |
gh_patches_debug_30250 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-231 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Alarm skill doesn't speak the date naturally
What I mean by this is if setting an alarm for 5 minutes, it reads the entire date (ie. "set an alarm for Thrusday, June 16th, at 04:05 pm") rather than simply saying the same thing in simpler terms (ie. "set an alarm for 5 minues"). And if the date is a day ahead I think it would be best that it says "Tomorrow at 4:05pm".
</issue>
<code>
[start of mycroft/skills/scheduled_skills.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import abc
20 from datetime import datetime
21 from threading import Timer, Lock
22 from time import mktime
23
24 import parsedatetime as pdt
25
26 from adapt.intent import IntentBuilder
27 from mycroft.skills import time_rules
28 from mycroft.skills.core import MycroftSkill
29
30 __author__ = 'jdorleans'
31
32
33 class ScheduledSkill(MycroftSkill):
34 """
35 Abstract class which provides a repeatable notification behaviour at a
36 specified time.
37
38 Skills implementation inherits this class when it needs to schedule a task
39 or a notification.
40 """
41
42 DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())
43
44 def __init__(self, name, emitter=None):
45 super(ScheduledSkill, self).__init__(name, emitter)
46 self.timer = None
47 self.calendar = pdt.Calendar()
48 self.time_rules = time_rules.create(self.lang)
49
50 def schedule(self):
51 times = sorted(self.get_times())
52
53 if len(times) > 0:
54 self.cancel()
55 t = times[0]
56 now = self.get_utc_time()
57 delay = max(float(t) - now, 1)
58 self.timer = Timer(delay, self.notify, [t])
59 self.start()
60
61 def start(self):
62 if self.timer:
63 self.timer.start()
64
65 def cancel(self):
66 if self.timer:
67 self.timer.cancel()
68
69 def convert_local(self, utc_time):
70 return utc_time + self.DELTA_TIME
71
72 def get_utc_time(self, sentence=''):
73 return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME
74
75 def get_formatted_time(self, timestamp):
76 return datetime.fromtimestamp(timestamp).strftime(
77 self.config_core.get('time.format'))
78
79 @abc.abstractmethod
80 def get_times(self):
81 pass
82
83 @abc.abstractmethod
84 def notify(self, timestamp):
85 pass
86
87
88 class ScheduledCRUDSkill(ScheduledSkill):
89 """
90 Abstract CRUD class which provides a repeatable notification behaviour at
91 a specified time.
92
93 It registers CRUD intents and exposes its functions to manipulate a
94 provided ``data``
95
96 Skills implementation inherits this class when it needs to schedule a task
97 or a notification with a provided data
98 that can be manipulated by CRUD commands.
99
100 E.g. CRUD operations for a Reminder Skill
101 #. "Mycroft, list two reminders"
102 #. "Mycroft, list all reminders"
103 #. "Mycroft, delete one reminder"
104 #. "Mycroft, remind me to contribute to Mycroft project"
105 """
106
107 LOCK = Lock()
108 REPEAT_TASK = 'repeat'
109 PENDING_TASK = 'pending'
110 ONE_DAY_SECS = 86400
111
112 def __init__(self, name, emitter=None, basedir=None):
113 super(ScheduledCRUDSkill, self).__init__(name, emitter)
114 self.data = {}
115 self.repeat_data = {}
116 self.basedir = basedir
117
118 def initialize(self):
119 self.load_data()
120 self.load_repeat_data()
121 self.load_data_files(self.basedir)
122 self.register_regex("(?P<" + self.name + "Amount>\d+)")
123 self.register_intent(
124 self.build_intent_create().build(), self.handle_create)
125 self.register_intent(
126 self.build_intent_list().build(), self.handle_list)
127 self.register_intent(
128 self.build_intent_delete().build(), self.handle_delete)
129 self.schedule()
130
131 @abc.abstractmethod
132 def load_data(self):
133 pass
134
135 @abc.abstractmethod
136 def load_repeat_data(self):
137 pass
138
139 def build_intent_create(self):
140 return IntentBuilder(
141 self.name + 'CreateIntent').require(self.name + 'CreateVerb')
142
143 def build_intent_list(self):
144 return IntentBuilder(
145 self.name + 'ListIntent').require(self.name + 'ListVerb') \
146 .optionally(self.name + 'Amount').require(self.name + 'Keyword')
147
148 def build_intent_delete(self):
149 return IntentBuilder(
150 self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \
151 .optionally(self.name + 'Amount').require(self.name + 'Keyword')
152
153 def get_times(self):
154 return self.data.keys()
155
156 def handle_create(self, message):
157 utterance = message.metadata.get('utterance')
158 date = self.get_utc_time(utterance)
159 delay = date - self.get_utc_time()
160
161 if delay > 0:
162 self.feedback_create(date)
163 self.add_sync(date, message)
164 self.save_sync()
165 else:
166 self.speak_dialog('schedule.datetime.error')
167
168 def feedback_create(self, utc_time):
169 self.speak_dialog(
170 'schedule.create', data=self.build_feedback_payload(utc_time))
171
172 def add_sync(self, utc_time, message):
173 with self.LOCK:
174 self.add(utc_time, message)
175
176 def add(self, utc_time, message):
177 utterance = message.metadata.get('utterance')
178 self.data[utc_time] = None
179 self.repeat_data[utc_time] = self.time_rules.get_week_days(utterance)
180
181 def remove_sync(self, utc_time, add_next=True):
182 with self.LOCK:
183 val = self.remove(utc_time, add_next)
184 return val
185
186 def remove(self, utc_time, add_next=True):
187 value = self.data.pop(utc_time)
188 self.add_next_time(utc_time, value, add_next)
189 return value
190
191 def add_next_time(self, utc_time, value, add_next=True):
192 days = self.repeat_data.pop(utc_time)
193 if add_next and days:
194 now_time = self.get_utc_time()
195 next_time = utc_time + self.ONE_DAY_SECS
196 now_day = datetime.fromtimestamp(utc_time).weekday()
197 next_day = datetime.fromtimestamp(next_time).weekday()
198 while next_day != now_day:
199 if days[next_day] and next_time >= now_time:
200 self.data[next_time] = value
201 self.repeat_data[next_time] = days
202 break
203 next_time += self.ONE_DAY_SECS
204 next_day = datetime.fromtimestamp(next_time).weekday()
205
206 def save_sync(self):
207 with self.LOCK:
208 self.save()
209
210 @abc.abstractmethod
211 def save(self):
212 pass
213
214 def handle_list(self, message):
215 count = self.get_amount(message)
216 if count > 0:
217 for key in sorted(self.data.keys()):
218 if count > 0:
219 self.feedback_list(key)
220 count -= 1
221 else:
222 break
223 else:
224 self.speak_dialog('schedule.list.empty')
225
226 def feedback_list(self, utc_time):
227 self.speak_dialog(
228 'schedule.list', data=self.build_feedback_payload(utc_time))
229
230 def build_feedback_payload(self, utc_time):
231 timestamp = self.convert_local(float(utc_time))
232 payload = {
233 'data': self.data.get(utc_time),
234 'datetime': self.get_formatted_time(timestamp)
235 }
236 return payload
237
238 def handle_delete(self, message):
239 count = self.get_amount(message)
240 if count > 0:
241 amount = count
242 for key in sorted(self.data.keys()):
243 if count > 0:
244 self.remove_sync(key, False)
245 count -= 1
246 else:
247 break
248 self.feedback_delete(amount)
249 self.save_sync()
250 else:
251 self.speak_dialog('schedule.delete.empty')
252
253 def feedback_delete(self, amount):
254 if amount > 1:
255 self.speak_dialog('schedule.delete.many', data={'amount': amount})
256 else:
257 self.speak_dialog(
258 'schedule.delete.single', data={'amount': amount})
259
260 # TODO - Localization
261 def get_amount(self, message, default=None):
262 size = len(self.data)
263 amount = message.metadata.get(self.name + 'Amount', default)
264 if amount in ['all', 'my', 'all my', None]:
265 total = size
266 elif amount in ['one', 'the next', 'the following']:
267 total = 1
268 elif amount == 'two':
269 total = 2
270 else:
271 total = int(amount)
272 return min(total, size)
273
[end of mycroft/skills/scheduled_skills.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/skills/scheduled_skills.py b/mycroft/skills/scheduled_skills.py
--- a/mycroft/skills/scheduled_skills.py
+++ b/mycroft/skills/scheduled_skills.py
@@ -26,9 +26,12 @@
from adapt.intent import IntentBuilder
from mycroft.skills import time_rules
from mycroft.skills.core import MycroftSkill
+from mycroft.util.log import getLogger
__author__ = 'jdorleans'
+logger = getLogger(__name__)
+
class ScheduledSkill(MycroftSkill):
"""
@@ -40,6 +43,9 @@
"""
DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())
+ SECONDS_PER_DAY = 86400
+ SECONDS_PER_HOUR = 3600
+ SECONDS_PER_MINUTE = 60
def __init__(self, name, emitter=None):
super(ScheduledSkill, self).__init__(name, emitter)
@@ -73,7 +79,19 @@
return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME
def get_formatted_time(self, timestamp):
- return datetime.fromtimestamp(timestamp).strftime(
+ date = datetime.fromtimestamp(timestamp)
+ now = datetime.now()
+ diff = (date - now).total_seconds()
+ if diff <= self.SECONDS_PER_DAY:
+ hours, remainder = divmod(diff, self.SECONDS_PER_HOUR)
+ minutes, seconds = divmod(remainder, self.SECONDS_PER_MINUTE)
+ if hours:
+ return "%s hours and %s minutes from now" % \
+ (int(hours), int(minutes))
+ else:
+ return "%s minutes and %s seconds from now" % \
+ (int(minutes), int(seconds))
+ return date.strftime(
self.config_core.get('time.format'))
@abc.abstractmethod
| {"golden_diff": "diff --git a/mycroft/skills/scheduled_skills.py b/mycroft/skills/scheduled_skills.py\n--- a/mycroft/skills/scheduled_skills.py\n+++ b/mycroft/skills/scheduled_skills.py\n@@ -26,9 +26,12 @@\n from adapt.intent import IntentBuilder\n from mycroft.skills import time_rules\n from mycroft.skills.core import MycroftSkill\n+from mycroft.util.log import getLogger\n \n __author__ = 'jdorleans'\n \n+logger = getLogger(__name__)\n+\n \n class ScheduledSkill(MycroftSkill):\n \"\"\"\n@@ -40,6 +43,9 @@\n \"\"\"\n \n DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())\n+ SECONDS_PER_DAY = 86400\n+ SECONDS_PER_HOUR = 3600\n+ SECONDS_PER_MINUTE = 60\n \n def __init__(self, name, emitter=None):\n super(ScheduledSkill, self).__init__(name, emitter)\n@@ -73,7 +79,19 @@\n return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME\n \n def get_formatted_time(self, timestamp):\n- return datetime.fromtimestamp(timestamp).strftime(\n+ date = datetime.fromtimestamp(timestamp)\n+ now = datetime.now()\n+ diff = (date - now).total_seconds()\n+ if diff <= self.SECONDS_PER_DAY:\n+ hours, remainder = divmod(diff, self.SECONDS_PER_HOUR)\n+ minutes, seconds = divmod(remainder, self.SECONDS_PER_MINUTE)\n+ if hours:\n+ return \"%s hours and %s minutes from now\" % \\\n+ (int(hours), int(minutes))\n+ else:\n+ return \"%s minutes and %s seconds from now\" % \\\n+ (int(minutes), int(seconds))\n+ return date.strftime(\n self.config_core.get('time.format'))\n \n @abc.abstractmethod\n", "issue": "Alarm skill doesn't speak the date naturally\nWhat I mean by this is if setting an alarm for 5 minutes, it reads the entire date (ie. \"set an alarm for Thrusday, June 16th, at 04:05 pm\") rather than simply saying the same thing in simpler terms (ie. \"set an alarm for 5 minues\"). And if the date is a day ahead I think it would be best that it says \"Tomorrow at 4:05pm\".\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport abc\nfrom datetime import datetime\nfrom threading import Timer, Lock\nfrom time import mktime\n\nimport parsedatetime as pdt\n\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills import time_rules\nfrom mycroft.skills.core import MycroftSkill\n\n__author__ = 'jdorleans'\n\n\nclass ScheduledSkill(MycroftSkill):\n \"\"\"\n Abstract class which provides a repeatable notification behaviour at a\n specified time.\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification.\n \"\"\"\n\n DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())\n\n def __init__(self, name, emitter=None):\n super(ScheduledSkill, self).__init__(name, emitter)\n self.timer = None\n self.calendar = pdt.Calendar()\n self.time_rules = time_rules.create(self.lang)\n\n def schedule(self):\n times = sorted(self.get_times())\n\n if len(times) > 0:\n self.cancel()\n t = times[0]\n now = self.get_utc_time()\n delay = max(float(t) - now, 1)\n self.timer = Timer(delay, self.notify, [t])\n self.start()\n\n def start(self):\n if self.timer:\n self.timer.start()\n\n def cancel(self):\n if self.timer:\n self.timer.cancel()\n\n def convert_local(self, utc_time):\n return utc_time + self.DELTA_TIME\n\n def get_utc_time(self, sentence=''):\n return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME\n\n def get_formatted_time(self, timestamp):\n return datetime.fromtimestamp(timestamp).strftime(\n self.config_core.get('time.format'))\n\n @abc.abstractmethod\n def get_times(self):\n pass\n\n @abc.abstractmethod\n def notify(self, timestamp):\n pass\n\n\nclass ScheduledCRUDSkill(ScheduledSkill):\n \"\"\"\n Abstract CRUD class which provides a repeatable notification behaviour at\n a specified time.\n\n It registers CRUD intents and exposes its functions to manipulate a\n provided ``data``\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification with a provided data\n that can be manipulated by CRUD commands.\n\n E.g. CRUD operations for a Reminder Skill\n #. \"Mycroft, list two reminders\"\n #. \"Mycroft, list all reminders\"\n #. \"Mycroft, delete one reminder\"\n #. \"Mycroft, remind me to contribute to Mycroft project\"\n \"\"\"\n\n LOCK = Lock()\n REPEAT_TASK = 'repeat'\n PENDING_TASK = 'pending'\n ONE_DAY_SECS = 86400\n\n def __init__(self, name, emitter=None, basedir=None):\n super(ScheduledCRUDSkill, self).__init__(name, emitter)\n self.data = {}\n self.repeat_data = {}\n self.basedir = basedir\n\n def initialize(self):\n self.load_data()\n self.load_repeat_data()\n self.load_data_files(self.basedir)\n self.register_regex(\"(?P<\" + self.name + \"Amount>\\d+)\")\n self.register_intent(\n self.build_intent_create().build(), self.handle_create)\n self.register_intent(\n self.build_intent_list().build(), self.handle_list)\n self.register_intent(\n self.build_intent_delete().build(), self.handle_delete)\n self.schedule()\n\n @abc.abstractmethod\n def load_data(self):\n pass\n\n @abc.abstractmethod\n def load_repeat_data(self):\n pass\n\n def build_intent_create(self):\n return IntentBuilder(\n self.name + 'CreateIntent').require(self.name + 'CreateVerb')\n\n def build_intent_list(self):\n return IntentBuilder(\n self.name + 'ListIntent').require(self.name + 'ListVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def build_intent_delete(self):\n return IntentBuilder(\n self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def get_times(self):\n return self.data.keys()\n\n def handle_create(self, message):\n utterance = message.metadata.get('utterance')\n date = self.get_utc_time(utterance)\n delay = date - self.get_utc_time()\n\n if delay > 0:\n self.feedback_create(date)\n self.add_sync(date, message)\n self.save_sync()\n else:\n self.speak_dialog('schedule.datetime.error')\n\n def feedback_create(self, utc_time):\n self.speak_dialog(\n 'schedule.create', data=self.build_feedback_payload(utc_time))\n\n def add_sync(self, utc_time, message):\n with self.LOCK:\n self.add(utc_time, message)\n\n def add(self, utc_time, message):\n utterance = message.metadata.get('utterance')\n self.data[utc_time] = None\n self.repeat_data[utc_time] = self.time_rules.get_week_days(utterance)\n\n def remove_sync(self, utc_time, add_next=True):\n with self.LOCK:\n val = self.remove(utc_time, add_next)\n return val\n\n def remove(self, utc_time, add_next=True):\n value = self.data.pop(utc_time)\n self.add_next_time(utc_time, value, add_next)\n return value\n\n def add_next_time(self, utc_time, value, add_next=True):\n days = self.repeat_data.pop(utc_time)\n if add_next and days:\n now_time = self.get_utc_time()\n next_time = utc_time + self.ONE_DAY_SECS\n now_day = datetime.fromtimestamp(utc_time).weekday()\n next_day = datetime.fromtimestamp(next_time).weekday()\n while next_day != now_day:\n if days[next_day] and next_time >= now_time:\n self.data[next_time] = value\n self.repeat_data[next_time] = days\n break\n next_time += self.ONE_DAY_SECS\n next_day = datetime.fromtimestamp(next_time).weekday()\n\n def save_sync(self):\n with self.LOCK:\n self.save()\n\n @abc.abstractmethod\n def save(self):\n pass\n\n def handle_list(self, message):\n count = self.get_amount(message)\n if count > 0:\n for key in sorted(self.data.keys()):\n if count > 0:\n self.feedback_list(key)\n count -= 1\n else:\n break\n else:\n self.speak_dialog('schedule.list.empty')\n\n def feedback_list(self, utc_time):\n self.speak_dialog(\n 'schedule.list', data=self.build_feedback_payload(utc_time))\n\n def build_feedback_payload(self, utc_time):\n timestamp = self.convert_local(float(utc_time))\n payload = {\n 'data': self.data.get(utc_time),\n 'datetime': self.get_formatted_time(timestamp)\n }\n return payload\n\n def handle_delete(self, message):\n count = self.get_amount(message)\n if count > 0:\n amount = count\n for key in sorted(self.data.keys()):\n if count > 0:\n self.remove_sync(key, False)\n count -= 1\n else:\n break\n self.feedback_delete(amount)\n self.save_sync()\n else:\n self.speak_dialog('schedule.delete.empty')\n\n def feedback_delete(self, amount):\n if amount > 1:\n self.speak_dialog('schedule.delete.many', data={'amount': amount})\n else:\n self.speak_dialog(\n 'schedule.delete.single', data={'amount': amount})\n\n # TODO - Localization\n def get_amount(self, message, default=None):\n size = len(self.data)\n amount = message.metadata.get(self.name + 'Amount', default)\n if amount in ['all', 'my', 'all my', None]:\n total = size\n elif amount in ['one', 'the next', 'the following']:\n total = 1\n elif amount == 'two':\n total = 2\n else:\n total = int(amount)\n return min(total, size)\n", "path": "mycroft/skills/scheduled_skills.py"}]} | 3,292 | 416 |
gh_patches_debug_33517 | rasdani/github-patches | git_diff | UTNkar__moore-65 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check registry on account creation
The registry is currently not checked for membership status when an account is created.
</issue>
<code>
[start of website/members/cron.py]
1 import kronos
2 import requests
3 from django.conf import settings
4 from requests.auth import HTTPDigestAuth
5
6 from members.models import Member
7
8
9 @kronos.register('0 3 * * 1') # At 03:00 on Monday.
10 def update_membership_status():
11 r = requests.get(
12 'https://register.utn.se/api.php',
13 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
14 settings.MEMBERSHIP_API_PASSWORD),
15 params={
16 'action': 'list',
17 },
18 )
19 try:
20 data = r.json()
21 except ValueError:
22 return
23
24 for member in Member.objects.all():
25 if member.person_number().replace('-', '') in data:
26 member.update(data='member')
27 else:
28 member.update_status(data='nonmember')
29 member.save()
30
[end of website/members/cron.py]
[start of website/members/signals.py]
1 import datetime
2 from django.conf import settings
3 from django.core.mail import EmailMultiAlternatives
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6 from django.template import loader
7 from django.utils import timezone
8 from simple_email_confirmation import unconfirmed_email_created
9
10 from members.models import Member
11
12
13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
14 def send_confirmation_email(sender, email, user=None, **kwargs):
15 user = user or sender
16 if user is not None:
17 context = {
18 'email': email,
19 'domain': settings.BASE_URL,
20 'site_name': settings.WAGTAIL_SITE_NAME,
21 'token': user.get_confirmation_key(email),
22 }
23
24 subject = loader.render_to_string(
25 'members/email_change_subject.txt', context)
26 # Email subject *must not* contain newlines
27 subject = ''.join(subject.splitlines())
28 body = loader.render_to_string('members/email_change_email.html',
29 context)
30
31 email_message = EmailMultiAlternatives(subject, body, None, [email])
32 email_message.send()
33
34
35 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
36 def check_membership(sender, instance, **kwargs):
37 if timezone.now() - instance.status_changed > datetime.timedelta(1):
38 instance.update_status()
39
[end of website/members/signals.py]
[start of website/members/models.py]
1 import requests
2 from django.conf import settings
3 from django.contrib.auth.models import AbstractUser
4 from django.core import validators
5 from django.db import models
6 from django.utils import timezone
7 from django.utils.translation import ugettext_lazy as _
8 from requests.auth import HTTPDigestAuth
9 from simple_email_confirmation.models import SimpleEmailConfirmationUserMixin
10
11 from utils.translation import TranslatedField
12
13
14 class StudyProgram(models.Model):
15 """This class describes a university study program"""
16
17 class Meta:
18 verbose_name = _('study program')
19 verbose_name_plural = _('study programs')
20
21 DEGREE_CHOICES = (
22 ('bachelor', _('Bachelor\'s Degree')),
23 ('master', _('Master\'s Degree')),
24 ('engineer', _('Engineer\'s Degree')),
25 )
26
27 name_en = models.CharField(
28 max_length=255,
29 verbose_name=_('English program name'),
30 help_text=_('Enter the name of the study program'),
31 null=False,
32 blank=False,
33 )
34
35 name_sv = models.CharField(
36 max_length=255,
37 verbose_name=_('Swedish program name'),
38 help_text=_('Enter the name of the study program'),
39 null=False,
40 blank=False,
41 )
42
43 name = TranslatedField('name_en', 'name_sv')
44
45 abbreviation_en = models.CharField(
46 max_length=130,
47 verbose_name=_('English program abbreviation'),
48 help_text=_('Enter the abbreviation for the study program'),
49 null=True,
50 blank=True,
51 )
52
53 abbreviation_sv = models.CharField(
54 max_length=130,
55 verbose_name=_('Swedish program abbreviation'),
56 help_text=_('Enter the abbreviation for the study program'),
57 null=True,
58 blank=True,
59 )
60
61 abbreviation = TranslatedField('name_en', 'name_sv')
62
63 degree = models.CharField(
64 max_length=20,
65 choices=DEGREE_CHOICES,
66 verbose_name=_('Degree type'),
67 blank=False,
68 null=False,
69 )
70
71 def __str__(self) -> str:
72 return '%s in %s' % (self.get_degree_display(), self.name)
73
74
75 class Member(SimpleEmailConfirmationUserMixin, AbstractUser):
76 """This class describes a member"""
77
78 # ---- Personal information ------
79
80 birthday = models.DateField(
81 verbose_name=_('Birthday'),
82 null=True
83 )
84
85 person_number_ext = models.CharField(
86 max_length=4,
87 verbose_name=_('Person number extension'),
88 help_text=_('Enter the last four digits of your Swedish person '
89 'number, given by the Swedish tax authority'),
90 validators=[validators.RegexValidator(
91 regex=r'^\d{4}$',
92 message=_('The person number extension consists of four numbers'),
93 )],
94 unique_for_date="birthday",
95 null=True,
96 blank=True,
97 )
98
99 # ---- Membership information ------
100
101 MEMBERSHIP_CHOICES = (
102 ('unknown', _('Unknown')),
103 ('nonmember', _('Nonmember')),
104 ('member', _('Member')),
105 ('alumnus', _('Alumnus')),
106 )
107
108 status = models.CharField(
109 max_length=20,
110 choices=MEMBERSHIP_CHOICES,
111 verbose_name=_('Membership status'),
112 blank=False,
113 null=False,
114 default='unknown'
115 )
116 status_changed = models.DateTimeField(
117 default=timezone.now,
118 null=False,
119 )
120
121 # ---- Contact information ------
122
123 phone_number = models.CharField(
124 max_length=20,
125 verbose_name=_('Phone number'),
126 help_text=_('Enter a phone number so UTN may reach you'),
127 validators=[validators.RegexValidator(
128 regex=r'^\+?\d+$',
129 message=_('Please enter a valid phone number'),
130 )],
131 null=True,
132 blank=True,
133 )
134
135 # ---- University information ------
136
137 registration_year = models.CharField(
138 max_length=4,
139 verbose_name=_('Registration year'),
140 help_text=_('Enter the year you started studying at the TakNat '
141 'faculty'),
142 validators=[validators.RegexValidator(
143 regex=r'^\d{4}$',
144 message=_('Please enter a valid year')
145 )],
146 null=True,
147 blank=True,
148 )
149
150 study = models.ForeignKey(
151 StudyProgram,
152 on_delete=models.SET_NULL,
153 null=True,
154 blank=True,
155 )
156
157 def __str__(self) -> str:
158 if self.first_name and self.last_name:
159 return '%s %s' % (self.first_name, self.last_name)
160 else:
161 return self.username
162
163 def person_number(self) -> str:
164 if self.birthday is None or self.person_number_ext is None:
165 return ''
166 else:
167 return '%s-%s' % (self.birthday.strftime('%Y%m%d'),
168 self.person_number_ext)
169
170 def update_status(self, data=None):
171 if data is None:
172 r = requests.get(
173 'https://register.utn.se/api.php',
174 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
175 settings.MEMBERSHIP_API_PASSWORD),
176 params={
177 'action': 'check',
178 'person_number': self.person_number().replace('-', '')
179 },
180 )
181 try:
182 data = r.json().get('status')
183 except ValueError:
184 return
185
186 if data == 'member':
187 self.status = 'member'
188 elif data == 'nonmember':
189 if self.status in ['unknown', 'nonmember']:
190 self.status = 'nonmember'
191 else:
192 self.status = 'alumnus'
193
194 self.status_changed = timezone.now()
195
196 def remove_old_email(self):
197 for email in self.get_unconfirmed_emails() or []:
198 self.remove_email(email)
199 for email in self.get_confirmed_emails():
200 if email != self.email:
201 self.remove_email(email)
202
[end of website/members/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/cron.py b/website/members/cron.py
--- a/website/members/cron.py
+++ b/website/members/cron.py
@@ -23,7 +23,9 @@
for member in Member.objects.all():
if member.person_number().replace('-', '') in data:
- member.update(data='member')
+ member.update_status(data='member')
else:
member.update_status(data='nonmember')
- member.save()
+ Member.objects.filter(pk=member.pk).update(
+ status=member.status, status_changed=member.status_changed
+ )
diff --git a/website/members/models.py b/website/members/models.py
--- a/website/members/models.py
+++ b/website/members/models.py
@@ -169,17 +169,21 @@
def update_status(self, data=None):
if data is None:
- r = requests.get(
- 'https://register.utn.se/api.php',
- auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
- settings.MEMBERSHIP_API_PASSWORD),
- params={
- 'action': 'check',
- 'person_number': self.person_number().replace('-', '')
- },
- )
+ if self.person_number() == '':
+ return
try:
+ r = requests.get(
+ 'https://register.utn.se/api.php',
+ auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
+ settings.MEMBERSHIP_API_PASSWORD),
+ params={
+ 'action': 'check',
+ 'person_number': self.person_number().replace('-', '')
+ },
+ )
data = r.json().get('status')
+ except requests.exceptions.ConnectionError:
+ data = 'unknown'
except ValueError:
return
diff --git a/website/members/signals.py b/website/members/signals.py
--- a/website/members/signals.py
+++ b/website/members/signals.py
@@ -34,5 +34,6 @@
@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
def check_membership(sender, instance, **kwargs):
- if timezone.now() - instance.status_changed > datetime.timedelta(1):
+ if timezone.now() - instance.status_changed > datetime.timedelta(1)\
+ or instance.status == 'unknown':
instance.update_status()
| {"golden_diff": "diff --git a/website/members/cron.py b/website/members/cron.py\n--- a/website/members/cron.py\n+++ b/website/members/cron.py\n@@ -23,7 +23,9 @@\n \n for member in Member.objects.all():\n if member.person_number().replace('-', '') in data:\n- member.update(data='member')\n+ member.update_status(data='member')\n else:\n member.update_status(data='nonmember')\n- member.save()\n+ Member.objects.filter(pk=member.pk).update(\n+ status=member.status, status_changed=member.status_changed\n+ )\ndiff --git a/website/members/models.py b/website/members/models.py\n--- a/website/members/models.py\n+++ b/website/members/models.py\n@@ -169,17 +169,21 @@\n \n def update_status(self, data=None):\n if data is None:\n- r = requests.get(\n- 'https://register.utn.se/api.php',\n- auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n- settings.MEMBERSHIP_API_PASSWORD),\n- params={\n- 'action': 'check',\n- 'person_number': self.person_number().replace('-', '')\n- },\n- )\n+ if self.person_number() == '':\n+ return\n try:\n+ r = requests.get(\n+ 'https://register.utn.se/api.php',\n+ auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n+ settings.MEMBERSHIP_API_PASSWORD),\n+ params={\n+ 'action': 'check',\n+ 'person_number': self.person_number().replace('-', '')\n+ },\n+ )\n data = r.json().get('status')\n+ except requests.exceptions.ConnectionError:\n+ data = 'unknown'\n except ValueError:\n return\n \ndiff --git a/website/members/signals.py b/website/members/signals.py\n--- a/website/members/signals.py\n+++ b/website/members/signals.py\n@@ -34,5 +34,6 @@\n \n @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\n def check_membership(sender, instance, **kwargs):\n- if timezone.now() - instance.status_changed > datetime.timedelta(1):\n+ if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n+ or instance.status == 'unknown':\n instance.update_status()\n", "issue": "Check registry on account creation\nThe registry is currently not checked for membership status when an account is created.\n", "before_files": [{"content": "import kronos\nimport requests\nfrom django.conf import settings\nfrom requests.auth import HTTPDigestAuth\n\nfrom members.models import Member\n\n\[email protected]('0 3 * * 1') # At 03:00 on Monday.\ndef update_membership_status():\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'list',\n },\n )\n try:\n data = r.json()\n except ValueError:\n return\n\n for member in Member.objects.all():\n if member.person_number().replace('-', '') in data:\n member.update(data='member')\n else:\n member.update_status(data='nonmember')\n member.save()\n", "path": "website/members/cron.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n", "path": "website/members/signals.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bachelor', _('Bachelor\\'s Degree')),\n ('master', _('Master\\'s Degree')),\n ('engineer', _('Engineer\\'s Degree')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation_en = models.CharField(\n max_length=130,\n verbose_name=_('English program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation_sv = models.CharField(\n max_length=130,\n verbose_name=_('Swedish program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=False,\n null=False,\n )\n\n def __str__(self) -> str:\n return '%s in %s' % (self.get_degree_display(), self.name)\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n null=True,\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n null=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n null=True,\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n null=True,\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n try:\n data = r.json().get('status')\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "website/members/models.py"}]} | 2,884 | 527 |
gh_patches_debug_12893 | rasdani/github-patches | git_diff | scrapy__scrapy-4420 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
follow_all fails with an empty list of URLs
<!--
Thanks for taking an interest in Scrapy!
If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/.
The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.
Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md
The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs
-->
### Description
`follow_all` with an empty list of urls fails with `ValueError('Please supply exactly one of the following arguments: urls, css, xpath')`
What I would expect instead is just an empty generator of requests.
### Steps to Reproduce
```py
class Spider(scrapy.Spider):
def parse(self, response):
yield from response.follow_all([], self.parse)
```
**Expected behavior:**
No error is raised
**Actual behavior:**
`ValueError('Please supply exactly one of the following arguments: urls, css, xpath')` exception is raised.
### Versions
2.0
### Additional context
I think the solution is just a matter of changing this line: https://github.com/scrapy/scrapy/blob/master/scrapy/http/response/text.py#L191
</issue>
<code>
[start of scrapy/http/response/text.py]
1 """
2 This module implements the TextResponse class which adds encoding handling and
3 discovering (through HTTP headers) to base Response class.
4
5 See documentation in docs/topics/request-response.rst
6 """
7
8 from contextlib import suppress
9 from typing import Generator
10 from urllib.parse import urljoin
11
12 import parsel
13 from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
14 http_content_type_encoding, resolve_encoding)
15 from w3lib.html import strip_html5_whitespace
16
17 from scrapy.http import Request
18 from scrapy.http.response import Response
19 from scrapy.utils.python import memoizemethod_noargs, to_unicode
20 from scrapy.utils.response import get_base_url
21
22
23 class TextResponse(Response):
24
25 _DEFAULT_ENCODING = 'ascii'
26
27 def __init__(self, *args, **kwargs):
28 self._encoding = kwargs.pop('encoding', None)
29 self._cached_benc = None
30 self._cached_ubody = None
31 self._cached_selector = None
32 super(TextResponse, self).__init__(*args, **kwargs)
33
34 def _set_url(self, url):
35 if isinstance(url, str):
36 self._url = to_unicode(url, self.encoding)
37 else:
38 super(TextResponse, self)._set_url(url)
39
40 def _set_body(self, body):
41 self._body = b'' # used by encoding detection
42 if isinstance(body, str):
43 if self._encoding is None:
44 raise TypeError('Cannot convert unicode body - %s has no encoding' %
45 type(self).__name__)
46 self._body = body.encode(self._encoding)
47 else:
48 super(TextResponse, self)._set_body(body)
49
50 def replace(self, *args, **kwargs):
51 kwargs.setdefault('encoding', self.encoding)
52 return Response.replace(self, *args, **kwargs)
53
54 @property
55 def encoding(self):
56 return self._declared_encoding() or self._body_inferred_encoding()
57
58 def _declared_encoding(self):
59 return self._encoding or self._headers_encoding() \
60 or self._body_declared_encoding()
61
62 def body_as_unicode(self):
63 """Return body as unicode"""
64 return self.text
65
66 @property
67 def text(self):
68 """ Body as unicode """
69 # access self.encoding before _cached_ubody to make sure
70 # _body_inferred_encoding is called
71 benc = self.encoding
72 if self._cached_ubody is None:
73 charset = 'charset=%s' % benc
74 self._cached_ubody = html_to_unicode(charset, self.body)[1]
75 return self._cached_ubody
76
77 def urljoin(self, url):
78 """Join this Response's url with a possible relative url to form an
79 absolute interpretation of the latter."""
80 return urljoin(get_base_url(self), url)
81
82 @memoizemethod_noargs
83 def _headers_encoding(self):
84 content_type = self.headers.get(b'Content-Type', b'')
85 return http_content_type_encoding(to_unicode(content_type))
86
87 def _body_inferred_encoding(self):
88 if self._cached_benc is None:
89 content_type = to_unicode(self.headers.get(b'Content-Type', b''))
90 benc, ubody = html_to_unicode(content_type, self.body,
91 auto_detect_fun=self._auto_detect_fun,
92 default_encoding=self._DEFAULT_ENCODING)
93 self._cached_benc = benc
94 self._cached_ubody = ubody
95 return self._cached_benc
96
97 def _auto_detect_fun(self, text):
98 for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
99 try:
100 text.decode(enc)
101 except UnicodeError:
102 continue
103 return resolve_encoding(enc)
104
105 @memoizemethod_noargs
106 def _body_declared_encoding(self):
107 return html_body_declared_encoding(self.body)
108
109 @property
110 def selector(self):
111 from scrapy.selector import Selector
112 if self._cached_selector is None:
113 self._cached_selector = Selector(self)
114 return self._cached_selector
115
116 def xpath(self, query, **kwargs):
117 return self.selector.xpath(query, **kwargs)
118
119 def css(self, query):
120 return self.selector.css(query)
121
122 def follow(self, url, callback=None, method='GET', headers=None, body=None,
123 cookies=None, meta=None, encoding=None, priority=0,
124 dont_filter=False, errback=None, cb_kwargs=None, flags=None):
125 # type: (...) -> Request
126 """
127 Return a :class:`~.Request` instance to follow a link ``url``.
128 It accepts the same arguments as ``Request.__init__`` method,
129 but ``url`` can be not only an absolute URL, but also
130
131 * a relative URL
132 * a :class:`~scrapy.link.Link` object, e.g. the result of
133 :ref:`topics-link-extractors`
134 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
135 ``response.css('a.my_link')[0]``
136 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
137 ``response.css('a::attr(href)')[0]`` or
138 ``response.xpath('//img/@src')[0]``
139
140 See :ref:`response-follow-example` for usage examples.
141 """
142 if isinstance(url, parsel.Selector):
143 url = _url_from_selector(url)
144 elif isinstance(url, parsel.SelectorList):
145 raise ValueError("SelectorList is not supported")
146 encoding = self.encoding if encoding is None else encoding
147 return super(TextResponse, self).follow(
148 url=url,
149 callback=callback,
150 method=method,
151 headers=headers,
152 body=body,
153 cookies=cookies,
154 meta=meta,
155 encoding=encoding,
156 priority=priority,
157 dont_filter=dont_filter,
158 errback=errback,
159 cb_kwargs=cb_kwargs,
160 flags=flags,
161 )
162
163 def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,
164 cookies=None, meta=None, encoding=None, priority=0,
165 dont_filter=False, errback=None, cb_kwargs=None, flags=None,
166 css=None, xpath=None):
167 # type: (...) -> Generator[Request, None, None]
168 """
169 A generator that produces :class:`~.Request` instances to follow all
170 links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
171 ``__init__`` method, except that each ``urls`` element does not need to be
172 an absolute URL, it can be any of the following:
173
174 * a relative URL
175 * a :class:`~scrapy.link.Link` object, e.g. the result of
176 :ref:`topics-link-extractors`
177 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
178 ``response.css('a.my_link')[0]``
179 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
180 ``response.css('a::attr(href)')[0]`` or
181 ``response.xpath('//img/@src')[0]``
182
183 In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
184 within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
185
186 Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
187 using the ``css`` or ``xpath`` parameters, this method will not produce requests for
188 selectors from which links cannot be obtained (for instance, anchor tags without an
189 ``href`` attribute)
190 """
191 arg_count = len(list(filter(None, (urls, css, xpath))))
192 if arg_count != 1:
193 raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')
194 if not urls:
195 if css:
196 urls = self.css(css)
197 if xpath:
198 urls = self.xpath(xpath)
199 if isinstance(urls, parsel.SelectorList):
200 selectors = urls
201 urls = []
202 for sel in selectors:
203 with suppress(_InvalidSelector):
204 urls.append(_url_from_selector(sel))
205 return super(TextResponse, self).follow_all(
206 urls=urls,
207 callback=callback,
208 method=method,
209 headers=headers,
210 body=body,
211 cookies=cookies,
212 meta=meta,
213 encoding=encoding,
214 priority=priority,
215 dont_filter=dont_filter,
216 errback=errback,
217 cb_kwargs=cb_kwargs,
218 flags=flags,
219 )
220
221
222 class _InvalidSelector(ValueError):
223 """
224 Raised when a URL cannot be obtained from a Selector
225 """
226
227
228 def _url_from_selector(sel):
229 # type: (parsel.Selector) -> str
230 if isinstance(sel.root, str):
231 # e.g. ::attr(href) result
232 return strip_html5_whitespace(sel.root)
233 if not hasattr(sel.root, 'tag'):
234 raise _InvalidSelector("Unsupported selector: %s" % sel)
235 if sel.root.tag not in ('a', 'link'):
236 raise _InvalidSelector("Only <a> and <link> elements are supported; got <%s>" %
237 sel.root.tag)
238 href = sel.root.get('href')
239 if href is None:
240 raise _InvalidSelector("<%s> element has no href attribute: %s" %
241 (sel.root.tag, sel))
242 return strip_html5_whitespace(href)
243
[end of scrapy/http/response/text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py
--- a/scrapy/http/response/text.py
+++ b/scrapy/http/response/text.py
@@ -188,9 +188,11 @@
selectors from which links cannot be obtained (for instance, anchor tags without an
``href`` attribute)
"""
- arg_count = len(list(filter(None, (urls, css, xpath))))
- if arg_count != 1:
- raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')
+ arguments = [x for x in (urls, css, xpath) if x is not None]
+ if len(arguments) != 1:
+ raise ValueError(
+ "Please supply exactly one of the following arguments: urls, css, xpath"
+ )
if not urls:
if css:
urls = self.css(css)
| {"golden_diff": "diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py\n--- a/scrapy/http/response/text.py\n+++ b/scrapy/http/response/text.py\n@@ -188,9 +188,11 @@\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n- arg_count = len(list(filter(None, (urls, css, xpath))))\n- if arg_count != 1:\n- raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')\n+ arguments = [x for x in (urls, css, xpath) if x is not None]\n+ if len(arguments) != 1:\n+ raise ValueError(\n+ \"Please supply exactly one of the following arguments: urls, css, xpath\"\n+ )\n if not urls:\n if css:\n urls = self.css(css)\n", "issue": "follow_all fails with an empty list of URLs\n<!--\r\n\r\nThanks for taking an interest in Scrapy!\r\n\r\nIf you have a question that starts with \"How to...\", please see the Scrapy Community page: https://scrapy.org/community/.\r\nThe GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.\r\n\r\nKeep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md\r\n\r\nThe following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs\r\n\r\n-->\r\n\r\n### Description\r\n\r\n`follow_all` with an empty list of urls fails with `ValueError('Please supply exactly one of the following arguments: urls, css, xpath')`\r\n\r\nWhat I would expect instead is just an empty generator of requests. \r\n\r\n### Steps to Reproduce\r\n\r\n```py\r\nclass Spider(scrapy.Spider):\r\n\r\n def parse(self, response):\r\n yield from response.follow_all([], self.parse)\r\n```\r\n\r\n**Expected behavior:** \r\n\r\nNo error is raised\r\n\r\n**Actual behavior:**\r\n\r\n`ValueError('Please supply exactly one of the following arguments: urls, css, xpath')` exception is raised. \r\n\r\n\r\n### Versions\r\n\r\n2.0\r\n\r\n### Additional context\r\n\r\nI think the solution is just a matter of changing this line: https://github.com/scrapy/scrapy/blob/master/scrapy/http/response/text.py#L191\r\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arg_count = len(list(filter(None, (urls, css, xpath))))\n if arg_count != 1:\n raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}]} | 3,542 | 196 |
gh_patches_debug_37135 | rasdani/github-patches | git_diff | sopel-irc__sopel-843 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strip leading . on .tld queries
`.tld .py` should be the same as `.tld py`
</issue>
<code>
[start of willie/modules/tld.py]
1 # coding=utf8
2 """
3 tld.py - Willie TLD Module
4 Copyright 2009-10, Michael Yanovich, yanovich.net
5 Licensed under the Eiffel Forum License 2.
6
7 http://willie.dftba.net
8 """
9 from __future__ import unicode_literals
10
11 from willie import web
12 from willie.module import commands, example
13 import re
14 import sys
15 if sys.version_info.major >= 3:
16 unicode = str
17
18 uri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'
19 r_tag = re.compile(r'<(?!!)[^>]+>')
20
21
22 @commands('tld')
23 @example('.tld ru')
24 def gettld(bot, trigger):
25 """Show information about the given Top Level Domain."""
26 page = web.get(uri)
27 search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
28 search = search.format(trigger.group(2))
29 re_country = re.compile(search)
30 matches = re_country.findall(page)
31 if not matches:
32 search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
33 search = search.format(trigger.group(2))
34 re_country = re.compile(search)
35 matches = re_country.findall(page)
36 if matches:
37 matches = list(matches[0])
38 i = 0
39 while i < len(matches):
40 matches[i] = r_tag.sub("", matches[i])
41 i += 1
42 desc = matches[2]
43 if len(desc) > 400:
44 desc = desc[:400] + "..."
45 reply = "%s -- %s. IDN: %s, DNSSEC: %s" % (matches[1], desc,
46 matches[3], matches[4])
47 bot.reply(reply)
48 else:
49 search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
50 search = search.format(unicode(trigger.group(2)))
51 re_country = re.compile(search)
52 matches = re_country.findall(page)
53 if matches:
54 matches = matches[0]
55 dict_val = dict()
56 dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] = matches
57 for key in dict_val:
58 if dict_val[key] == " ":
59 dict_val[key] = "N/A"
60 dict_val[key] = r_tag.sub('', dict_val[key])
61 if len(dict_val["notes"]) > 400:
62 dict_val["notes"] = dict_val["notes"][:400] + "..."
63 reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
64 else:
65 reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
66 bot.reply(reply)
67
[end of willie/modules/tld.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/willie/modules/tld.py b/willie/modules/tld.py
--- a/willie/modules/tld.py
+++ b/willie/modules/tld.py
@@ -24,13 +24,16 @@
def gettld(bot, trigger):
"""Show information about the given Top Level Domain."""
page = web.get(uri)
+ tld = trigger.group(2)
+ if tld[0] == '.':
+ tld = tld[1:]
search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if not matches:
search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -47,7 +50,7 @@
bot.reply(reply)
else:
search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(unicode(trigger.group(2)))
+ search = search.format(unicode(tld))
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -62,5 +65,5 @@
dict_val["notes"] = dict_val["notes"][:400] + "..."
reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
else:
- reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
+ reply = "No matches found for TLD: {0}".format(unicode(tld))
bot.reply(reply)
| {"golden_diff": "diff --git a/willie/modules/tld.py b/willie/modules/tld.py\n--- a/willie/modules/tld.py\n+++ b/willie/modules/tld.py\n@@ -24,13 +24,16 @@\n def gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n+ tld = trigger.group(2)\n+ if tld[0] == '.':\n+ tld = tld[1:]\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -47,7 +50,7 @@\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(unicode(trigger.group(2)))\n+ search = search.format(unicode(tld))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -62,5 +65,5 @@\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n- reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n+ reply = \"No matches found for TLD: {0}\".format(unicode(tld))\n bot.reply(reply)\n", "issue": "Strip leading . on .tld queries\n`.tld .py` should be the same as `.tld py`\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\ntld.py - Willie TLD Module\nCopyright 2009-10, Michael Yanovich, yanovich.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom willie import web\nfrom willie.module import commands, example\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n\nuri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'\nr_tag = re.compile(r'<(?!!)[^>]+>')\n\n\n@commands('tld')\n@example('.tld ru')\ndef gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = list(matches[0])\n i = 0\n while i < len(matches):\n matches[i] = r_tag.sub(\"\", matches[i])\n i += 1\n desc = matches[2]\n if len(desc) > 400:\n desc = desc[:400] + \"...\"\n reply = \"%s -- %s. IDN: %s, DNSSEC: %s\" % (matches[1], desc,\n matches[3], matches[4])\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(unicode(trigger.group(2)))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = matches[0]\n dict_val = dict()\n dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"] = matches\n for key in dict_val:\n if dict_val[key] == \" \":\n dict_val[key] = \"N/A\"\n dict_val[key] = r_tag.sub('', dict_val[key])\n if len(dict_val[\"notes\"]) > 400:\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n bot.reply(reply)\n", "path": "willie/modules/tld.py"}]} | 1,554 | 668 |
gh_patches_debug_25153 | rasdani/github-patches | git_diff | pytorch__pytorch-1404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dropout documentation is incorrect
"Randomly zeroes some of the elements of the input tensor. The elements to zero are randomized on every forward call."
This is incorrect; the function also scales up by 1/(1-p), which the implementation correctly does.
</issue>
<code>
[start of torch/nn/modules/dropout.py]
1 from .module import Module
2 from .. import functional as F
3
4
5 class Dropout(Module):
6 r"""Randomly zeroes some of the elements of the input tensor.
7 The elements to zero are randomized on every forward call.
8
9 Args:
10 p: probability of an element to be zeroed. Default: 0.5
11 inplace: If set to True, will do this operation in-place. Default: false
12
13 Shape:
14 - Input: `Any`. Input can be of any shape
15 - Output: `Same`. Output is of the same shape as input
16
17 Examples::
18
19 >>> m = nn.Dropout(p=0.2)
20 >>> input = autograd.Variable(torch.randn(20, 16))
21 >>> output = m(input)
22 """
23
24 def __init__(self, p=0.5, inplace=False):
25 super(Dropout, self).__init__()
26 if p < 0 or p > 1:
27 raise ValueError("dropout probability has to be between 0 and 1, "
28 "but got {}".format(p))
29 self.p = p
30 self.inplace = inplace
31
32 def forward(self, input):
33 return F.dropout(input, self.p, self.training, self.inplace)
34
35 def __repr__(self):
36 inplace_str = ', inplace' if self.inplace else ''
37 return self.__class__.__name__ + ' (' \
38 + 'p = ' + str(self.p) \
39 + inplace_str + ')'
40
41
42 class Dropout2d(Module):
43 r"""Randomly zeroes whole channels of the input tensor.
44 The channels to zero-out are randomized on every forward call.
45
46 *Usually the input comes from Conv2d modules.*
47
48 As described in the paper
49 `Efficient Object Localization Using Convolutional Networks`_ ,
50 if adjacent pixels within feature maps are strongly correlated
51 (as is normally the case in early convolution layers) then iid dropout
52 will not regularize the activations and will otherwise just result
53 in an effective learning rate decrease.
54
55 In this case, :func:`nn.Dropout2d` will help promote independence between
56 feature maps and should be used instead.
57
58 Args:
59 p (float, optional): probability of an element to be zeroed.
60 inplace (bool, optional): If set to True, will do this operation in-place
61
62 Shape:
63 - Input: :math:`(N, C, H, W)`
64 - Output: :math:`(N, C, H, W)` (same shape as input)
65
66 Examples::
67
68 >>> m = nn.Dropout2d(p=0.2)
69 >>> input = autograd.Variable(torch.randn(20, 16, 32, 32))
70 >>> output = m(input)
71
72 .. _Efficient Object Localization Using Convolutional Networks:
73 http://arxiv.org/abs/1411.4280
74 """
75
76 def __init__(self, p=0.5, inplace=False):
77 super(Dropout2d, self).__init__()
78 if p < 0 or p > 1:
79 raise ValueError("dropout probability has to be between 0 and 1, "
80 "but got {}".format(p))
81 self.p = p
82 self.inplace = inplace
83
84 def forward(self, input):
85 return self._backend.Dropout2d(self.p, self.training, self.inplace)(input)
86
87 def __repr__(self):
88 inplace_str = ', inplace' if self.inplace else ''
89 return self.__class__.__name__ + ' (' \
90 + 'p=' + str(self.p) \
91 + inplace_str + ')'
92
93
94 class Dropout3d(Module):
95 r"""Randomly zeroes whole channels of the input tensor.
96 The channels to zero are randomized on every forward call.
97
98 *Usually the input comes from Conv3d modules.*
99
100 As described in the paper
101 `Efficient Object Localization Using Convolutional Networks`_ ,
102 if adjacent pixels within feature maps are strongly correlated
103 (as is normally the case in early convolution layers) then iid dropout
104 will not regularize the activations and will otherwise just result
105 in an effective learning rate decrease.
106
107 In this case, :func:`nn.Dropout3d` will help promote independence between
108 feature maps and should be used instead.
109
110 Args:
111 p (float, optional): probability of an element to be zeroed.
112 inplace (bool, optional): If set to True, will do this operation in-place
113
114 Shape:
115 - Input: :math:`(N, C, D, H, W)`
116 - Output: :math:`(N, C, D, H, W)` (same shape as input)
117
118 Examples::
119
120 >>> m = nn.Dropout3d(p=0.2)
121 >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32))
122 >>> output = m(input)
123
124 .. _Efficient Object Localization Using Convolutional Networks:
125 http://arxiv.org/abs/1411.4280
126 """
127
128 def __init__(self, p=0.5, inplace=False):
129 super(Dropout3d, self).__init__()
130 if p < 0 or p > 1:
131 raise ValueError("dropout probability has to be between 0 and 1, "
132 "but got {}".format(p))
133 self.p = p
134 self.inplace = inplace
135
136 def forward(self, input):
137 return self._backend.Dropout3d(self.p, self.training, self.inplace)(input)
138
139 def __repr__(self):
140 inplace_str = ', inplace' if self.inplace else ''
141 return self.__class__.__name__ + ' (' \
142 + 'p=' + str(self.p) \
143 + inplace_str + ')'
144
[end of torch/nn/modules/dropout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py
--- a/torch/nn/modules/dropout.py
+++ b/torch/nn/modules/dropout.py
@@ -3,9 +3,19 @@
class Dropout(Module):
- r"""Randomly zeroes some of the elements of the input tensor.
+ r"""During training, randomly zeroes some of the elements of the input
+ tensor with probability *p* using samples from a bernoulli distribution.
The elements to zero are randomized on every forward call.
+ This has proven to be an effective technique for regularization and
+ preventing the co-adaptation of neurons as described in the paper
+ `Improving neural networks by preventing co-adaptation of feature
+ detectors`_ .
+
+ Furthermore, the outputs are scaled by a factor of *1/(1-p)* during
+ training. This means that during evaluation the module simply computes an
+ identity function.
+
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to True, will do this operation in-place. Default: false
@@ -19,6 +29,8 @@
>>> m = nn.Dropout(p=0.2)
>>> input = autograd.Variable(torch.randn(20, 16))
>>> output = m(input)
+
+ .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580
"""
def __init__(self, p=0.5, inplace=False):
| {"golden_diff": "diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py\n--- a/torch/nn/modules/dropout.py\n+++ b/torch/nn/modules/dropout.py\n@@ -3,9 +3,19 @@\n \n \n class Dropout(Module):\n- r\"\"\"Randomly zeroes some of the elements of the input tensor.\n+ r\"\"\"During training, randomly zeroes some of the elements of the input\n+ tensor with probability *p* using samples from a bernoulli distribution.\n The elements to zero are randomized on every forward call.\n \n+ This has proven to be an effective technique for regularization and\n+ preventing the co-adaptation of neurons as described in the paper\n+ `Improving neural networks by preventing co-adaptation of feature\n+ detectors`_ .\n+\n+ Furthermore, the outputs are scaled by a factor of *1/(1-p)* during\n+ training. This means that during evaluation the module simply computes an\n+ identity function.\n+\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n inplace: If set to True, will do this operation in-place. Default: false\n@@ -19,6 +29,8 @@\n >>> m = nn.Dropout(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16))\n >>> output = m(input)\n+\n+ .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580\n \"\"\"\n \n def __init__(self, p=0.5, inplace=False):\n", "issue": "Dropout documentation is incorrect\n\"Randomly zeroes some of the elements of the input tensor. The elements to zero are randomized on every forward call.\"\r\n\r\nThis is incorrect; the function also scales up by 1/(1-p), which the implementation correctly does.\n", "before_files": [{"content": "from .module import Module\nfrom .. import functional as F\n\n\nclass Dropout(Module):\n r\"\"\"Randomly zeroes some of the elements of the input tensor.\n The elements to zero are randomized on every forward call.\n\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n inplace: If set to True, will do this operation in-place. Default: false\n\n Shape:\n - Input: `Any`. Input can be of any shape\n - Output: `Same`. Output is of the same shape as input\n\n Examples::\n\n >>> m = nn.Dropout(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16))\n >>> output = m(input)\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return F.dropout(input, self.p, self.training, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p = ' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout2d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero-out are randomized on every forward call.\n\n *Usually the input comes from Conv2d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout2d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout2d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout2d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout2d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout3d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero are randomized on every forward call.\n\n *Usually the input comes from Conv3d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout3d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout3d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout3d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout3d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n", "path": "torch/nn/modules/dropout.py"}]} | 2,184 | 366 |
gh_patches_debug_746 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 81.skills-skilldialog throwing error: [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain
## Sample information
1. Sample type: \samples\
2. Sample language: python
3. Sample name: 81.skills-skilldialog
## Describe the bug
When you run the sample as per the instructions, the skill bot is throwing the following error:
======== Running on http://localhost:39783 ========
(Press CTRL+C to quit)
[on_turn_error] unhandled error: Cannot deserialize content-type: text/plain
Traceback (most recent call last):
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_adapter.py", line 128, in run_pipeline
context, callback
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status
return await self.receive_activity_internal(context, callback)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal
return await callback(context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/bots/skill_bot.py", line 21, in on_turn
self._conversation_state.create_property("DialogState"),
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_extensions.py", line 68, in run_dialog
result = await dialog_context.begin_dialog(dialog.id)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog
return await dialog.begin_dialog(self, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 67, in begin_dialog
turn_result = await self.on_begin_dialog(inner_dc, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 221, in on_begin_dialog
return await inner_dc.begin_dialog(self.initial_dialog_id, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog
return await dialog.begin_dialog(self, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 65, in begin_dialog
return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 156, in run_step
return await self.on_step(step_context)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 132, in on_step
return await self._steps[step_context.index](step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 50, in process_activity
return await self._on_event_activity(step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 77, in _on_event_activity
return await self._begin_get_weather(step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 156, in _begin_get_weather
get_weather_message, get_weather_message, InputHints.ignoring_input,
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 174, in send_activity
result = await self.send_activities([activity_or_text])
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 226, in send_activities
return await self._emit(self._on_send_activities, output, logic())
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 304, in _emit
return await logic
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 221, in logic
responses = await self.adapter.send_activities(self, output)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 729, in send_activities
raise error
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 715, in send_activities
activity.conversation.id, activity.reply_to_id, activity
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botframework/connector/aio/operations_async/_conversations_operations_async.py", line 529, in reply_to_activity
request, stream=False, **operation_config
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/async_client.py", line 115, in async_send
pipeline_response = await self.config.pipeline.run(request, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 159, in run
return await first_node.send(pipeline_request, **kwargs) # type: ignore
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 79, in send
response = await self.next.send(request, **kwargs) # type: ignore
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_requests.py", line 106, in send
return await self.next.send(request, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 84, in send
self._policy.on_response(request, response, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 252, in on_response
http_response.headers
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 226, in deserialize_from_http_generics
return cls.deserialize_from_text(body_bytes, content_type)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 203, in deserialize_from_text
raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
msrest.exceptions.DeserializationError: Cannot deserialize content-type: text/plain
## To Reproduce
Steps to reproduce the behavior:
1. Run the root & skill bots as per the instructions from the sample readme
2. Start the bot framework emulator & connect
3. Choose the DialogSkillBot
4. Enter activity 3
## Expected behavior
Error not returned
</issue>
<code>
[start of libraries/botbuilder-integration-aiohttp/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0"
8 REQUIRES = [
9 "botbuilder-schema==4.12.0",
10 "botframework-connector==4.12.0",
11 "botbuilder-core==4.12.0",
12 "aiohttp==3.6.2",
13 ]
14
15 root = os.path.abspath(os.path.dirname(__file__))
16
17 with open(os.path.join(root, "botbuilder", "integration", "aiohttp", "about.py")) as f:
18 package_info = {}
19 info = f.read()
20 exec(info, package_info)
21
22 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
23 long_description = f.read()
24
25 setup(
26 name=package_info["__title__"],
27 version=package_info["__version__"],
28 url=package_info["__uri__"],
29 author=package_info["__author__"],
30 description=package_info["__description__"],
31 keywords=[
32 "BotBuilderIntegrationAiohttp",
33 "bots",
34 "ai",
35 "botframework",
36 "botbuilder",
37 ],
38 long_description=long_description,
39 long_description_content_type="text/x-rst",
40 license=package_info["__license__"],
41 packages=[
42 "botbuilder.integration.aiohttp",
43 "botbuilder.integration.aiohttp.skills",
44 ],
45 install_requires=REQUIRES,
46 classifiers=[
47 "Programming Language :: Python :: 3.7",
48 "Intended Audience :: Developers",
49 "License :: OSI Approved :: MIT License",
50 "Operating System :: OS Independent",
51 "Development Status :: 5 - Production/Stable",
52 "Topic :: Scientific/Engineering :: Artificial Intelligence",
53 ],
54 )
55
[end of libraries/botbuilder-integration-aiohttp/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py
--- a/libraries/botbuilder-integration-aiohttp/setup.py
+++ b/libraries/botbuilder-integration-aiohttp/setup.py
@@ -9,6 +9,7 @@
"botbuilder-schema==4.12.0",
"botframework-connector==4.12.0",
"botbuilder-core==4.12.0",
+ "yarl<=1.4.2",
"aiohttp==3.6.2",
]
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-aiohttp/setup.py\n@@ -9,6 +9,7 @@\n \"botbuilder-schema==4.12.0\",\n \"botframework-connector==4.12.0\",\n \"botbuilder-core==4.12.0\",\n+ \"yarl<=1.4.2\",\n \"aiohttp==3.6.2\",\n ]\n", "issue": "Python 81.skills-skilldialog throwing error: [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain\n## Sample information\r\n1. Sample type: \\samples\\\r\n2. Sample language: python\r\n3. Sample name: 81.skills-skilldialog\r\n\r\n## Describe the bug\r\nWhen you run the sample as per the instructions, the skill bot is throwing the following error:\r\n\r\n======== Running on http://localhost:39783 ========\r\n(Press CTRL+C to quit)\r\n\r\n [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain\r\nTraceback (most recent call last):\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_adapter.py\", line 128, in run_pipeline\r\n context, callback\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py\", line 69, in receive_activity_with_status\r\n return await self.receive_activity_internal(context, callback)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py\", line 79, in receive_activity_internal\r\n return await callback(context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/bots/skill_bot.py\", line 21, in on_turn\r\n self._conversation_state.create_property(\"DialogState\"),\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_extensions.py\", line 68, in run_dialog\r\n result = await dialog_context.begin_dialog(dialog.id)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py\", line 91, in begin_dialog\r\n return await dialog.begin_dialog(self, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py\", line 67, in begin_dialog\r\n turn_result = await self.on_begin_dialog(inner_dc, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py\", line 221, in on_begin_dialog\r\n return await inner_dc.begin_dialog(self.initial_dialog_id, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py\", line 91, in begin_dialog\r\n return await dialog.begin_dialog(self, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 65, in begin_dialog\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 156, in run_step\r\n return await self.on_step(step_context)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 132, in on_step\r\n return await self._steps[step_context.index](step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 50, in process_activity\r\n return await self._on_event_activity(step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 77, in _on_event_activity\r\n return await self._begin_get_weather(step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 156, in _begin_get_weather\r\n get_weather_message, get_weather_message, InputHints.ignoring_input,\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 174, in send_activity\r\n result = await self.send_activities([activity_or_text])\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 226, in send_activities\r\n return await self._emit(self._on_send_activities, output, logic())\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 304, in _emit\r\n return await logic\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 221, in logic\r\n responses = await self.adapter.send_activities(self, output)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py\", line 729, in send_activities\r\n raise error\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py\", line 715, in send_activities\r\n activity.conversation.id, activity.reply_to_id, activity\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botframework/connector/aio/operations_async/_conversations_operations_async.py\", line 529, in reply_to_activity\r\n request, stream=False, **operation_config\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/async_client.py\", line 115, in async_send\r\n pipeline_response = await self.config.pipeline.run(request, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 159, in run\r\n return await first_node.send(pipeline_request, **kwargs) # type: ignore\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 79, in send\r\n response = await self.next.send(request, **kwargs) # type: ignore\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_requests.py\", line 106, in send\r\n return await self.next.send(request, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 84, in send\r\n self._policy.on_response(request, response, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 252, in on_response\r\n http_response.headers\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 226, in deserialize_from_http_generics\r\n return cls.deserialize_from_text(body_bytes, content_type)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 203, in deserialize_from_text\r\n raise DeserializationError(\"Cannot deserialize content-type: {}\".format(content_type))\r\nmsrest.exceptions.DeserializationError: Cannot deserialize content-type: text/plain\r\n\r\n\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Run the root & skill bots as per the instructions from the sample readme\r\n2. Start the bot framework emulator & connect\r\n3. Choose the DialogSkillBot\r\n4. Enter activity 3\r\n\r\n## Expected behavior\r\nError not returned\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"botbuilder-schema==4.12.0\",\n \"botframework-connector==4.12.0\",\n \"botbuilder-core==4.12.0\",\n \"aiohttp==3.6.2\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"integration\", \"aiohttp\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderIntegrationAiohttp\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.integration.aiohttp\",\n \"botbuilder.integration.aiohttp.skills\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-aiohttp/setup.py"}]} | 2,907 | 135 |
gh_patches_debug_4999 | rasdani/github-patches | git_diff | encode__starlette-434 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Path prefix for PATH_INFO with WSGIMiddleware
When mounting an app (e.g. Django) through `WSGIMiddleware` it seems to be required to also adjust `PATH_INFO` accordingly.
With the following, Django would e.g. redirect to "/admin/" (without the outer "/dj" prefix).
```python
djapp = django.core.wsgi.get_wsgi_application()
app.mount("/dj", app=WSGIMiddleware(djapp))
```
I've got this working through an additional "path_prefix" argument for WSGIMiddleware, but I think it makes sense to either prepend the mount path in `build_environ` always (using `"PATH_INFO": scope["root_path"] + scope["path"]`, and making it visible to the mounted app though), or handle e.g. "Location" response headers by prepending it there.
The latter could likely result in redirect-loops, and does not help with Django POSTing to "/admin/login" then.
I've not investigated too much into this yet, what do you think?
</issue>
<code>
[start of starlette/middleware/wsgi.py]
1 import asyncio
2 import io
3 import sys
4 import typing
5
6 from starlette.concurrency import run_in_threadpool
7 from starlette.types import ASGIInstance, Message, Receive, Scope, Send
8
9
10 def build_environ(scope: Scope, body: bytes) -> dict:
11 """
12 Builds a scope and request body into a WSGI environ object.
13 """
14 environ = {
15 "REQUEST_METHOD": scope["method"],
16 "SCRIPT_NAME": "",
17 "PATH_INFO": scope["path"],
18 "QUERY_STRING": scope["query_string"].decode("ascii"),
19 "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
20 "wsgi.version": (1, 0),
21 "wsgi.url_scheme": scope.get("scheme", "http"),
22 "wsgi.input": io.BytesIO(body),
23 "wsgi.errors": sys.stdout,
24 "wsgi.multithread": True,
25 "wsgi.multiprocess": True,
26 "wsgi.run_once": False,
27 }
28
29 # Get server name and port - required in WSGI, not in ASGI
30 server = scope.get("server") or ("localhost", 80)
31 environ["SERVER_NAME"] = server[0]
32 environ["SERVER_PORT"] = server[1]
33
34 # Get client IP address
35 if scope.get("client"):
36 environ["REMOTE_ADDR"] = scope["client"][0]
37
38 # Go through headers and make them into environ entries
39 for name, value in scope.get("headers", []):
40 name = name.decode("latin1")
41 if name == "content-length":
42 corrected_name = "CONTENT_LENGTH"
43 elif name == "content-type":
44 corrected_name = "CONTENT_TYPE"
45 else:
46 corrected_name = f"HTTP_{name}".upper().replace("-", "_")
47 # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
48 value = value.decode("latin1")
49 if corrected_name in environ:
50 value = environ[corrected_name] + "," + value
51 environ[corrected_name] = value
52 return environ
53
54
55 class WSGIMiddleware:
56 def __init__(self, app: typing.Callable, workers: int = 10) -> None:
57 self.app = app
58
59 def __call__(self, scope: Scope) -> ASGIInstance:
60 assert scope["type"] == "http"
61 return WSGIResponder(self.app, scope)
62
63
64 class WSGIResponder:
65 def __init__(self, app: typing.Callable, scope: Scope) -> None:
66 self.app = app
67 self.scope = scope
68 self.status = None
69 self.response_headers = None
70 self.send_event = asyncio.Event()
71 self.send_queue = [] # type: typing.List[typing.Optional[Message]]
72 self.loop = asyncio.get_event_loop()
73 self.response_started = False
74 self.exc_info = None # type: typing.Any
75
76 async def __call__(self, receive: Receive, send: Send) -> None:
77 body = b""
78 more_body = True
79 while more_body:
80 message = await receive()
81 body += message.get("body", b"")
82 more_body = message.get("more_body", False)
83 environ = build_environ(self.scope, body)
84 try:
85 sender = self.loop.create_task(self.sender(send))
86 await run_in_threadpool(self.wsgi, environ, self.start_response)
87 self.send_queue.append(None)
88 self.send_event.set()
89 await asyncio.wait_for(sender, None)
90 if self.exc_info is not None:
91 raise self.exc_info[0].with_traceback(
92 self.exc_info[1], self.exc_info[2]
93 )
94 finally:
95 if not sender.done():
96 sender.cancel() # pragma: no cover
97
98 async def sender(self, send: Send) -> None:
99 while True:
100 if self.send_queue:
101 message = self.send_queue.pop(0)
102 if message is None:
103 return
104 await send(message)
105 else:
106 await self.send_event.wait()
107 self.send_event.clear()
108
109 def start_response(
110 self,
111 status: str,
112 response_headers: typing.List[typing.Tuple[str, str]],
113 exc_info: typing.Any = None,
114 ) -> None:
115 self.exc_info = exc_info
116 if not self.response_started:
117 self.response_started = True
118 status_code_string, _ = status.split(" ", 1)
119 status_code = int(status_code_string)
120 headers = [
121 (name.encode("ascii"), value.encode("ascii"))
122 for name, value in response_headers
123 ]
124 self.send_queue.append(
125 {
126 "type": "http.response.start",
127 "status": status_code,
128 "headers": headers,
129 }
130 )
131 self.loop.call_soon_threadsafe(self.send_event.set)
132
133 def wsgi(self, environ: dict, start_response: typing.Callable) -> None:
134 for chunk in self.app(environ, start_response):
135 self.send_queue.append(
136 {"type": "http.response.body", "body": chunk, "more_body": True}
137 )
138 self.loop.call_soon_threadsafe(self.send_event.set)
139
140 self.send_queue.append({"type": "http.response.body", "body": b""})
141 self.loop.call_soon_threadsafe(self.send_event.set)
142
[end of starlette/middleware/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py
--- a/starlette/middleware/wsgi.py
+++ b/starlette/middleware/wsgi.py
@@ -13,7 +13,7 @@
"""
environ = {
"REQUEST_METHOD": scope["method"],
- "SCRIPT_NAME": "",
+ "SCRIPT_NAME": scope.get("root_path", ""),
"PATH_INFO": scope["path"],
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
| {"golden_diff": "diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py\n--- a/starlette/middleware/wsgi.py\n+++ b/starlette/middleware/wsgi.py\n@@ -13,7 +13,7 @@\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n- \"SCRIPT_NAME\": \"\",\n+ \"SCRIPT_NAME\": scope.get(\"root_path\", \"\"),\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n", "issue": "Path prefix for PATH_INFO with WSGIMiddleware\nWhen mounting an app (e.g. Django) through `WSGIMiddleware` it seems to be required to also adjust `PATH_INFO` accordingly.\r\n\r\nWith the following, Django would e.g. redirect to \"/admin/\" (without the outer \"/dj\" prefix).\r\n\r\n```python\r\ndjapp = django.core.wsgi.get_wsgi_application()\r\napp.mount(\"/dj\", app=WSGIMiddleware(djapp))\r\n```\r\n\r\nI've got this working through an additional \"path_prefix\" argument for WSGIMiddleware, but I think it makes sense to either prepend the mount path in `build_environ` always (using `\"PATH_INFO\": scope[\"root_path\"] + scope[\"path\"]`, and making it visible to the mounted app though), or handle e.g. \"Location\" response headers by prepending it there.\r\nThe latter could likely result in redirect-loops, and does not help with Django POSTing to \"/admin/login\" then.\r\n\r\nI've not investigated too much into this yet, what do you think?\n", "before_files": [{"content": "import asyncio\nimport io\nimport sys\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.types import ASGIInstance, Message, Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": \"\",\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable, workers: int = 10) -> None:\n self.app = app\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n return WSGIResponder(self.app, scope)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.send_event = asyncio.Event()\n self.send_queue = [] # type: typing.List[typing.Optional[Message]]\n self.loop = asyncio.get_event_loop()\n self.response_started = False\n self.exc_info = None # type: typing.Any\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n try:\n sender = self.loop.create_task(self.sender(send))\n await run_in_threadpool(self.wsgi, environ, self.start_response)\n self.send_queue.append(None)\n self.send_event.set()\n await asyncio.wait_for(sender, None)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(\n self.exc_info[1], self.exc_info[2]\n )\n finally:\n if not sender.done():\n sender.cancel() # pragma: no cover\n\n async def sender(self, send: Send) -> None:\n while True:\n if self.send_queue:\n message = self.send_queue.pop(0)\n if message is None:\n return\n await send(message)\n else:\n await self.send_event.wait()\n self.send_event.clear()\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n }\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n self.send_queue.append(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True}\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n self.send_queue.append({\"type\": \"http.response.body\", \"body\": b\"\"})\n self.loop.call_soon_threadsafe(self.send_event.set)\n", "path": "starlette/middleware/wsgi.py"}]} | 2,225 | 125 |
gh_patches_debug_22474 | rasdani/github-patches | git_diff | tobymao__sqlglot-3385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't parse `trim` in TrinoSQL
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```python
import sqlglot
print(sqlglot.__version__)
sql = "SELECT trim(',' FROM some_col);"
result = sqlglot.parse(sql, read="trino")
print(repr(result))
```
Expected:
```
23.12.2
[Select(
expressions=[
Trim(
this=Column(
this=Identifier(this=some_col, quoted=False)),
expression=Literal(this=,, is_string=True))])]
```
Got:
```
23.12.2
Traceback (most recent call last):
File "proof.py", line 7, in <module>
result = sqlglot.parse(sql, read="trino")
File ".../python3.8/site-packages/sqlglot/__init__.py", line 102, in parse
return Dialect.get_or_raise(read or dialect).parse(sql, **opts)
File ".../python3.8/site-packages/sqlglot/dialects/dialect.py", line 506, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File ".../python3.8/site-packages/sqlglot/parser.py", line 1175, in parse
return self._parse(
File ".../python3.8/site-packages/sqlglot/parser.py", line 1241, in _parse
expressions.append(parse_method(self))
File ".../python3.8/site-packages/sqlglot/parser.py", line 1476, in _parse_statement
expression = self._parse_set_operations(expression) if expression else self._parse_select()
File ".../python3.8/site-packages/sqlglot/parser.py", line 2532, in _parse_select
projections = self._parse_projections()
File ".../python3.8/site-packages/sqlglot/parser.py", line 2480, in _parse_projections
return self._parse_expressions()
File ".../python3.8/site-packages/sqlglot/parser.py", line 5695, in _parse_expressions
return self._parse_csv(self._parse_expression)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5649, in _parse_csv
parse_result = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3805, in _parse_expression
return self._parse_alias(self._parse_conjunction())
File ".../python3.8/site-packages/sqlglot/parser.py", line 3808, in _parse_conjunction
return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3811, in _parse_equality
return self._parse_tokens(self._parse_comparison, self.EQUALITY)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3814, in _parse_comparison
return self._parse_tokens(self._parse_range, self.COMPARISON)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3817, in _parse_range
this = this or self._parse_bitwise()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3941, in _parse_bitwise
this = self._parse_term()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3973, in _parse_term
return self._parse_tokens(self._parse_factor, self.TERM)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3977, in _parse_factor
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3998, in _parse_unary
return self._parse_at_time_zone(self._parse_type())
File ".../python3.8/site-packages/sqlglot/parser.py", line 4020, in _parse_type
this = self._parse_column()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4220, in _parse_column
this = self._parse_column_reference()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4224, in _parse_column_reference
this = self._parse_field()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4347, in _parse_field
field = self._parse_primary() or self._parse_function(
File ".../python3.8/site-packages/sqlglot/parser.py", line 4370, in _parse_function
func = self._parse_function_call(
File ".../python3.8/site-packages/sqlglot/parser.py", line 4458, in _parse_function_call
self._match_r_paren(this)
File ".../python3.8/site-packages/sqlglot/parser.py", line 6196, in _match_r_paren
self.raise_error("Expecting )")
File ".../python3.8/site-packages/sqlglot/parser.py", line 1285, in raise_error
raise error
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 20.
SELECT trim(',' FROM some_col);
```
**Official Documentation**
https://trino.io/docs/current/functions/string.html?highlight=trim#trim
</issue>
<code>
[start of sqlglot/dialects/trino.py]
1 from __future__ import annotations
2
3 from sqlglot import exp
4 from sqlglot.dialects.dialect import merge_without_target_sql
5 from sqlglot.dialects.presto import Presto
6
7
8 class Trino(Presto):
9 SUPPORTS_USER_DEFINED_TYPES = False
10 LOG_BASE_FIRST = True
11
12 class Generator(Presto.Generator):
13 TRANSFORMS = {
14 **Presto.Generator.TRANSFORMS,
15 exp.ArraySum: lambda self,
16 e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
17 exp.Merge: merge_without_target_sql,
18 }
19
20 SUPPORTED_JSON_PATH_PARTS = {
21 exp.JSONPathKey,
22 exp.JSONPathRoot,
23 exp.JSONPathSubscript,
24 }
25
26 class Tokenizer(Presto.Tokenizer):
27 HEX_STRINGS = [("X'", "'")]
28
[end of sqlglot/dialects/trino.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/dialects/trino.py b/sqlglot/dialects/trino.py
--- a/sqlglot/dialects/trino.py
+++ b/sqlglot/dialects/trino.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from sqlglot import exp
-from sqlglot.dialects.dialect import merge_without_target_sql
+from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql
from sqlglot.dialects.presto import Presto
@@ -9,12 +9,19 @@
SUPPORTS_USER_DEFINED_TYPES = False
LOG_BASE_FIRST = True
+ class Parser(Presto.Parser):
+ FUNCTION_PARSERS = {
+ **Presto.Parser.FUNCTION_PARSERS,
+ "TRIM": lambda self: self._parse_trim(),
+ }
+
class Generator(Presto.Generator):
TRANSFORMS = {
**Presto.Generator.TRANSFORMS,
exp.ArraySum: lambda self,
e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
exp.Merge: merge_without_target_sql,
+ exp.Trim: trim_sql,
}
SUPPORTED_JSON_PATH_PARTS = {
| {"golden_diff": "diff --git a/sqlglot/dialects/trino.py b/sqlglot/dialects/trino.py\n--- a/sqlglot/dialects/trino.py\n+++ b/sqlglot/dialects/trino.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n from sqlglot import exp\n-from sqlglot.dialects.dialect import merge_without_target_sql\n+from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql\n from sqlglot.dialects.presto import Presto\n \n \n@@ -9,12 +9,19 @@\n SUPPORTS_USER_DEFINED_TYPES = False\n LOG_BASE_FIRST = True\n \n+ class Parser(Presto.Parser):\n+ FUNCTION_PARSERS = {\n+ **Presto.Parser.FUNCTION_PARSERS,\n+ \"TRIM\": lambda self: self._parse_trim(),\n+ }\n+\n class Generator(Presto.Generator):\n TRANSFORMS = {\n **Presto.Generator.TRANSFORMS,\n exp.ArraySum: lambda self,\n e: f\"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)\",\n exp.Merge: merge_without_target_sql,\n+ exp.Trim: trim_sql,\n }\n \n SUPPORTED_JSON_PATH_PARTS = {\n", "issue": "Can't parse `trim` in TrinoSQL\n**Fully reproducible code snippet**\r\nPlease include a fully reproducible code snippet or the input sql, dialect, and expected output.\r\n```python\r\nimport sqlglot\r\nprint(sqlglot.__version__)\r\nsql = \"SELECT trim(',' FROM some_col);\"\r\nresult = sqlglot.parse(sql, read=\"trino\")\r\nprint(repr(result))\r\n```\r\n\r\nExpected:\r\n```\r\n23.12.2\r\n[Select(\r\n expressions=[\r\n Trim(\r\n this=Column(\r\n this=Identifier(this=some_col, quoted=False)),\r\n expression=Literal(this=,, is_string=True))])]\r\n```\r\n\r\nGot:\r\n```\r\n23.12.2\r\nTraceback (most recent call last):\r\n File \"proof.py\", line 7, in <module>\r\n result = sqlglot.parse(sql, read=\"trino\")\r\n File \".../python3.8/site-packages/sqlglot/__init__.py\", line 102, in parse\r\n return Dialect.get_or_raise(read or dialect).parse(sql, **opts)\r\n File \".../python3.8/site-packages/sqlglot/dialects/dialect.py\", line 506, in parse\r\n return self.parser(**opts).parse(self.tokenize(sql), sql)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1175, in parse\r\n return self._parse(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1241, in _parse\r\n expressions.append(parse_method(self))\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1476, in _parse_statement\r\n expression = self._parse_set_operations(expression) if expression else self._parse_select()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 2532, in _parse_select\r\n projections = self._parse_projections()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 2480, in _parse_projections\r\n return self._parse_expressions()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5695, in _parse_expressions\r\n return self._parse_csv(self._parse_expression)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5649, in _parse_csv\r\n parse_result = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3805, in _parse_expression\r\n return self._parse_alias(self._parse_conjunction())\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3808, in _parse_conjunction\r\n return self._parse_tokens(self._parse_equality, self.CONJUNCTION)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3811, in _parse_equality\r\n return self._parse_tokens(self._parse_comparison, self.EQUALITY)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3814, in _parse_comparison\r\n return self._parse_tokens(self._parse_range, self.COMPARISON)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3817, in _parse_range\r\n this = this or self._parse_bitwise()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3941, in _parse_bitwise\r\n this = self._parse_term()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3973, in _parse_term\r\n return self._parse_tokens(self._parse_factor, self.TERM)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3977, in _parse_factor\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3998, in _parse_unary\r\n return self._parse_at_time_zone(self._parse_type())\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4020, in _parse_type\r\n this = self._parse_column()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4220, in _parse_column\r\n this = self._parse_column_reference()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4224, in _parse_column_reference\r\n this = self._parse_field()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4347, in _parse_field\r\n field = self._parse_primary() or self._parse_function(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4370, in _parse_function\r\n func = self._parse_function_call(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4458, in _parse_function_call\r\n self._match_r_paren(this)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 6196, in _match_r_paren\r\n self.raise_error(\"Expecting )\")\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1285, in raise_error\r\n raise error\r\nsqlglot.errors.ParseError: Expecting ). Line 1, Col: 20.\r\n SELECT trim(',' FROM some_col);\r\n```\r\n\r\n**Official Documentation**\r\nhttps://trino.io/docs/current/functions/string.html?highlight=trim#trim\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import merge_without_target_sql\nfrom sqlglot.dialects.presto import Presto\n\n\nclass Trino(Presto):\n SUPPORTS_USER_DEFINED_TYPES = False\n LOG_BASE_FIRST = True\n\n class Generator(Presto.Generator):\n TRANSFORMS = {\n **Presto.Generator.TRANSFORMS,\n exp.ArraySum: lambda self,\n e: f\"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)\",\n exp.Merge: merge_without_target_sql,\n }\n\n SUPPORTED_JSON_PATH_PARTS = {\n exp.JSONPathKey,\n exp.JSONPathRoot,\n exp.JSONPathSubscript,\n }\n\n class Tokenizer(Presto.Tokenizer):\n HEX_STRINGS = [(\"X'\", \"'\")]\n", "path": "sqlglot/dialects/trino.py"}]} | 2,179 | 294 |
gh_patches_debug_37696 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Moving Foreign key column throws up an error.
## Description
I'm not able to extract a set of columns where one column is a FK. Specifically, this is step 18 on our library workflow. I get an unhandled exception:
> Foreign key associated with column 'Publications.Authors_id' could not find table 'Library.Authors' with which to generate a foreign key to target column 'id'
</issue>
<code>
[start of db/tables/operations/create.py]
1 from sqlalchemy import Column, TEXT, Table, MetaData
2 from sqlalchemy.ext import compiler
3 from sqlalchemy.schema import DDLElement
4
5 from db.columns.utils import init_mathesar_table_column_list_with_defaults
6 from db.schemas.operations.create import create_schema
7
8
9 def create_mathesar_table(name, schema, columns, engine, metadata=None):
10 """
11 This method creates a Postgres table in the specified schema using the
12 given name and column list. It adds internal mathesar columns to the
13 table.
14 """
15 columns = init_mathesar_table_column_list_with_defaults(columns)
16 create_schema(schema, engine)
17 # We need this so that we can create multiple mathesar tables in the
18 # same MetaData, enabling them to reference each other in the
19 # SQLAlchemy context (e.g., for creating a ForeignKey relationship)
20 if metadata is None:
21 metadata = MetaData(bind=engine, schema=schema)
22 table = Table(
23 name,
24 metadata,
25 *columns,
26 schema=schema
27 )
28 table.create(engine)
29 return table
30
31
32 def create_string_column_table(name, schema, column_names, engine):
33 """
34 This method creates a Postgres table in the specified schema, with all
35 columns being String type.
36 """
37 columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]
38 table = create_mathesar_table(name, schema, columns_, engine)
39 return table
40
41
42 class CreateTableAs(DDLElement):
43 def __init__(self, name, selectable):
44 self.name = name
45 self.selectable = selectable
46
47
48 @compiler.compiles(CreateTableAs)
49 def compile_create_table_as(element, compiler, **_):
50 return "CREATE TABLE %s AS (%s)" % (
51 element.name,
52 compiler.sql_compiler.process(element.selectable, literal_binds=True),
53 )
54
[end of db/tables/operations/create.py]
[start of mathesar/api/serializers/tables.py]
1 from django.urls import reverse
2 from psycopg2.errors import DuplicateTable
3 from rest_framework import serializers, status
4 from rest_framework.exceptions import ValidationError
5 from sqlalchemy.exc import ProgrammingError
6
7 from db.types.operations.convert import get_db_type_enum_from_id
8
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,
11 MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier,
12 )
13 from mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException
14 from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
15 from mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions
16 from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
17 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
18 from mathesar.api.serializers.columns import SimpleColumnSerializer
19 from mathesar.models.base import Column, Table, DataFile
20 from mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table
21
22
23 class TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
24 columns = SimpleColumnSerializer(many=True, required=False)
25 records_url = serializers.SerializerMethodField()
26 constraints_url = serializers.SerializerMethodField()
27 columns_url = serializers.SerializerMethodField()
28 joinable_tables_url = serializers.SerializerMethodField()
29 type_suggestions_url = serializers.SerializerMethodField()
30 previews_url = serializers.SerializerMethodField()
31 name = serializers.CharField(required=False, allow_blank=True, default='')
32 import_target = serializers.PrimaryKeyRelatedField(
33 required=False, allow_null=True, queryset=Table.current_objects.all()
34 )
35 data_files = serializers.PrimaryKeyRelatedField(
36 required=False, many=True, queryset=DataFile.objects.all()
37 )
38
39 class Meta:
40 model = Table
41 fields = ['id', 'name', 'import_target', 'schema', 'created_at', 'updated_at', 'import_verified',
42 'columns', 'records_url', 'constraints_url', 'columns_url',
43 'joinable_tables_url', 'type_suggestions_url', 'previews_url',
44 'data_files', 'has_dependencies']
45
46 def get_records_url(self, obj):
47 if isinstance(obj, Table):
48 # Only get records if we are serializing an existing table
49 request = self.context['request']
50 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
51 else:
52 return None
53
54 def get_constraints_url(self, obj):
55 if isinstance(obj, Table):
56 # Only get constraints if we are serializing an existing table
57 request = self.context['request']
58 return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))
59 else:
60 return None
61
62 def get_columns_url(self, obj):
63 if isinstance(obj, Table):
64 # Only get columns if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70 def get_joinable_tables_url(self, obj):
71 if isinstance(obj, Table):
72 # Only get type suggestions if we are serializing an existing table
73 request = self.context['request']
74 return request.build_absolute_uri(reverse('table-joinable-tables', kwargs={'pk': obj.pk}))
75 else:
76 return None
77
78 def get_type_suggestions_url(self, obj):
79 if isinstance(obj, Table):
80 # Only get type suggestions if we are serializing an existing table
81 request = self.context['request']
82 return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))
83 else:
84 return None
85
86 def get_previews_url(self, obj):
87 if isinstance(obj, Table):
88 # Only get previews if we are serializing an existing table
89 request = self.context['request']
90 return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))
91 else:
92 return None
93
94 def validate_data_files(self, data_files):
95 if data_files and len(data_files) > 1:
96 raise MultipleDataFileAPIException()
97 return data_files
98
99 def create(self, validated_data):
100 schema = validated_data['schema']
101 data_files = validated_data.get('data_files')
102 name = validated_data.get('name') or gen_table_name(schema, data_files)
103 import_target = validated_data.get('import_target', None)
104
105 try:
106 if data_files:
107 table = create_table_from_datafile(data_files, name, schema)
108 if import_target:
109 table.import_target = import_target
110 table.is_temp = True
111 table.save()
112 else:
113 table = create_empty_table(name, schema)
114 except ProgrammingError as e:
115 if type(e.orig) == DuplicateTable:
116 raise DuplicateTableAPIException(
117 e,
118 message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
119 field="name",
120 status_code=status.HTTP_400_BAD_REQUEST
121 )
122 else:
123 raise ProgrammingAPIException(e)
124 return table
125
126 def update(self, instance, validated_data):
127 if self.partial:
128 # Save the fields that are stored in the model.
129 present_model_fields = []
130 for model_field in instance.MODEL_FIELDS:
131 if model_field in validated_data:
132 setattr(instance, model_field, validated_data[model_field])
133 present_model_fields.append(model_field)
134 instance.save(update_fields=present_model_fields)
135 for key in present_model_fields:
136 del validated_data[key]
137 # Save the fields that are stored in the underlying DB.
138 try:
139 instance.update_sa_table(validated_data)
140 except ValueError as e:
141 raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
142 return instance
143
144 def validate(self, data):
145 if self.partial:
146 columns = data.get('columns', None)
147 if columns is not None:
148 for col in columns:
149 id = col.get('id', None)
150 if id is None:
151 message = "'id' field is required while batch updating columns."
152 raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)
153 return data
154
155
156 class TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):
157 name = serializers.CharField(required=False)
158 columns = SimpleColumnSerializer(many=True)
159
160 def validate_columns(self, columns):
161 table = self.context['table']
162 column_names = [col["name"] for col in columns]
163 if not len(column_names) == len(set(column_names)):
164 raise DistinctColumnRequiredAPIException()
165 if not len(columns) == len(table.sa_columns):
166 raise ColumnSizeMismatchAPIException()
167 for column in columns:
168 db_type_id = column['type']
169 db_type = get_db_type_enum_from_id(db_type_id)
170 if db_type is None:
171 raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)
172 return columns
173
174
175 class MoveTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):
176 move_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
177 target_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
178
179
180 class SplitTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):
181 extract_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
182 extracted_table_name = serializers.CharField()
183
184
185 class SplitTableResponseSerializer(MathesarErrorMessageMixin, serializers.Serializer):
186 extracted_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
187 remainder_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
188
189
190 class MappingSerializer(MathesarErrorMessageMixin, serializers.Serializer):
191 # TBD
192 pass
193
194
195 class TableImportSerializer(MathesarErrorMessageMixin, serializers.Serializer):
196 import_target = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all(), required=True)
197 data_files = serializers.PrimaryKeyRelatedField(required=True, many=True, queryset=DataFile.objects.all())
198 mappings = MappingSerializer(required=True, allow_null=True)
199
[end of mathesar/api/serializers/tables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py
--- a/db/tables/operations/create.py
+++ b/db/tables/operations/create.py
@@ -19,6 +19,11 @@
# SQLAlchemy context (e.g., for creating a ForeignKey relationship)
if metadata is None:
metadata = MetaData(bind=engine, schema=schema)
+ metadata.reflect()
+ # The exception raised by SQLAlchemy upon hitting a duplicate table in the
+ # schema is non-specific.
+ if (name, schema) in [(t.name, t.schema) for t in metadata.sorted_tables]:
+ raise DuplicateTable
table = Table(
name,
metadata,
@@ -29,6 +34,10 @@
return table
+class DuplicateTable(Exception):
+ pass
+
+
def create_string_column_table(name, schema, column_names, engine):
"""
This method creates a Postgres table in the specified schema, with all
diff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py
--- a/mathesar/api/serializers/tables.py
+++ b/mathesar/api/serializers/tables.py
@@ -1,10 +1,10 @@
from django.urls import reverse
-from psycopg2.errors import DuplicateTable
from rest_framework import serializers, status
from rest_framework.exceptions import ValidationError
from sqlalchemy.exc import ProgrammingError
from db.types.operations.convert import get_db_type_enum_from_id
+from db.tables.operations.create import DuplicateTable
from mathesar.api.exceptions.validation_exceptions.exceptions import (
ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,
@@ -111,16 +111,15 @@
table.save()
else:
table = create_empty_table(name, schema)
+ except DuplicateTable as e:
+ raise DuplicateTableAPIException(
+ e,
+ message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
+ field="name",
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
except ProgrammingError as e:
- if type(e.orig) == DuplicateTable:
- raise DuplicateTableAPIException(
- e,
- message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
- field="name",
- status_code=status.HTTP_400_BAD_REQUEST
- )
- else:
- raise ProgrammingAPIException(e)
+ raise ProgrammingAPIException(e)
return table
def update(self, instance, validated_data):
| {"golden_diff": "diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py\n--- a/db/tables/operations/create.py\n+++ b/db/tables/operations/create.py\n@@ -19,6 +19,11 @@\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n+ metadata.reflect()\n+ # The exception raised by SQLAlchemy upon hitting a duplicate table in the\n+ # schema is non-specific.\n+ if (name, schema) in [(t.name, t.schema) for t in metadata.sorted_tables]:\n+ raise DuplicateTable\n table = Table(\n name,\n metadata,\n@@ -29,6 +34,10 @@\n return table\n \n \n+class DuplicateTable(Exception):\n+ pass\n+\n+\n def create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\ndiff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py\n--- a/mathesar/api/serializers/tables.py\n+++ b/mathesar/api/serializers/tables.py\n@@ -1,10 +1,10 @@\n from django.urls import reverse\n-from psycopg2.errors import DuplicateTable\n from rest_framework import serializers, status\n from rest_framework.exceptions import ValidationError\n from sqlalchemy.exc import ProgrammingError\n \n from db.types.operations.convert import get_db_type_enum_from_id\n+from db.tables.operations.create import DuplicateTable\n \n from mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n@@ -111,16 +111,15 @@\n table.save()\n else:\n table = create_empty_table(name, schema)\n+ except DuplicateTable as e:\n+ raise DuplicateTableAPIException(\n+ e,\n+ message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n+ field=\"name\",\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except ProgrammingError as e:\n- if type(e.orig) == DuplicateTable:\n- raise DuplicateTableAPIException(\n- e,\n- message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n- field=\"name\",\n- status_code=status.HTTP_400_BAD_REQUEST\n- )\n- else:\n- raise ProgrammingAPIException(e)\n+ raise ProgrammingAPIException(e)\n return table\n \n def update(self, instance, validated_data):\n", "issue": "Moving Foreign key column throws up an error. \n## Description\r\nI'm not able to extract a set of columns where one column is a FK. Specifically, this is step 18 on our library workflow. I get an unhandled exception:\r\n\r\n> Foreign key associated with column 'Publications.Authors_id' could not find table 'Library.Authors' with which to generate a foreign key to target column 'id'\r\n\r\n\n", "before_files": [{"content": "from sqlalchemy import Column, TEXT, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}, {"content": "from django.urls import reverse\nfrom psycopg2.errors import DuplicateTable\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.types.operations.convert import get_db_type_enum_from_id\n\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier,\n)\nfrom mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.columns import SimpleColumnSerializer\nfrom mathesar.models.base import Column, Table, DataFile\nfrom mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table\n\n\nclass TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n joinable_tables_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n import_target = serializers.PrimaryKeyRelatedField(\n required=False, allow_null=True, queryset=Table.current_objects.all()\n )\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'import_target', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'joinable_tables_url', 'type_suggestions_url', 'previews_url',\n 'data_files', 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_joinable_tables_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-joinable-tables', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise MultipleDataFileAPIException()\n return data_files\n\n def create(self, validated_data):\n schema = validated_data['schema']\n data_files = validated_data.get('data_files')\n name = validated_data.get('name') or gen_table_name(schema, data_files)\n import_target = validated_data.get('import_target', None)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n if import_target:\n table.import_target = import_target\n table.is_temp = True\n table.save()\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise DuplicateTableAPIException(\n e,\n message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n field=\"name\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise ProgrammingAPIException(e)\n return table\n\n def update(self, instance, validated_data):\n if self.partial:\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in instance.MODEL_FIELDS:\n if model_field in validated_data:\n setattr(instance, model_field, validated_data[model_field])\n present_model_fields.append(model_field)\n instance.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del validated_data[key]\n # Save the fields that are stored in the underlying DB.\n try:\n instance.update_sa_table(validated_data)\n except ValueError as e:\n raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return instance\n\n def validate(self, data):\n if self.partial:\n columns = data.get('columns', None)\n if columns is not None:\n for col in columns:\n id = col.get('id', None)\n if id is None:\n message = \"'id' field is required while batch updating columns.\"\n raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)\n return data\n\n\nclass TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n def validate_columns(self, columns):\n table = self.context['table']\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise DistinctColumnRequiredAPIException()\n if not len(columns) == len(table.sa_columns):\n raise ColumnSizeMismatchAPIException()\n for column in columns:\n db_type_id = column['type']\n db_type = get_db_type_enum_from_id(db_type_id)\n if db_type is None:\n raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)\n return columns\n\n\nclass MoveTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n move_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n target_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass SplitTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extract_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n extracted_table_name = serializers.CharField()\n\n\nclass SplitTableResponseSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extracted_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n remainder_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass MappingSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n # TBD\n pass\n\n\nclass TableImportSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n import_target = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all(), required=True)\n data_files = serializers.PrimaryKeyRelatedField(required=True, many=True, queryset=DataFile.objects.all())\n mappings = MappingSerializer(required=True, allow_null=True)\n", "path": "mathesar/api/serializers/tables.py"}]} | 3,329 | 561 |
gh_patches_debug_6902 | rasdani/github-patches | git_diff | napari__napari-3929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
missing doc items (may be copy-docs problem)
## 🐛 Bug
I was looking at napari.org after seeing #3916 get merged (a big update to plugin docs), and it looks like the toc is missing items. This seems to have something to do with the copy-docs script.
In particular,
* it looks like files that are not in the toc are missing
* some items in the toc go missing
* the order of the items in the toc seems to change from the authored order
@tlambert03 please feel free to add detail
</issue>
<code>
[start of docs/copy-docs.py]
1 """
2 Copy docs from the napari repo into the napari.github.io repo
3 and update the table of contents.
4 By default, will assume that there is a folder named napari.github.io
5 in the same directory as the napari folder, if not a different copy
6 destination can be provided.
7
8 Read ORGANIZATION.md to learn more about how the documentation sources
9 are organized, and how everything comes together.
10
11 python -m copy-docs [dstdir]
12 """
13
14 import copy
15 import os
16 import os.path as osp
17 import shutil
18 import sys
19 from fnmatch import fnmatch
20
21 import yaml
22
23 # path to copy and locations to copy to if different
24 TO_COPY = [
25 'ORGANIZATION.md',
26 'glossary.md',
27 'developers',
28 'community',
29 'howtos',
30 'release',
31 'roadmaps',
32 'images',
33 osp.join('_templates', 'autosummary'),
34 *[
35 (dire, osp.join(dire, 'stable'))
36 for dire in ('api', 'guides', 'plugins')
37 ],
38 ]
39
40 # paths to ignore
41 IGNORE = [
42 osp.join('images', 'logo.png'),
43 ]
44
45 SRC = osp.dirname(__file__)
46
47 DOC_EXTS = ['.md', '.rst', '.ipynb']
48
49 TOC_IGNORE = [
50 'api/stable',
51 'images',
52 '_templates',
53 'ORGANIZATION.md',
54 'glossary.md', # this document will still be at the destination ToC
55 'guides/stable/_layer_events.md',
56 'guides/stable/_viewer_events.md',
57 ]
58
59
60 def exclude_filter(path):
61 """Exclude files in the ignore list and duplicated files."""
62 for ignore in IGNORE:
63 if fnmatch(path, osp.join(SRC, ignore)): # in ignore list
64 return True
65 else:
66 if osp.isdir(path) or osp.splitext(path)[1] != '.md':
67 return False
68 with open(path) as f:
69 firstline = f.readline()
70 return firstline.startswith('```{include}') # duplicate file
71
72
73 def copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):
74 """Copy a path from the source directory to the destination directory,
75 with the given path relative to the directory roots.
76
77 Parameters
78 ----------
79 srcdir : path-like
80 Source directory root to copy from.
81 dstdir : path-like
82 Destination directory root to copy to.
83 path : path-like
84 Path relative to the `srcdir` of the path to copy from.
85 newpath : path-like, optional
86 Path relative to the `dstdir` of the path to copy to.
87 If not provided, will default to the value of `path`.
88 exclude : function(path-like) -> bool, keyword-only, optional
89 Conditional function on whether to exclude the given path.
90
91 Returns
92 -------
93 files : list of path-like
94 Paths of the copied files.
95 """
96 if newpath is None:
97 newpath = path
98
99 src = osp.join(srcdir, path)
100 dst = osp.join(dstdir, newpath)
101
102 if exclude(src): # skip this path
103 return []
104
105 print(f'copying {src} to {dst}')
106
107 if osp.isfile(src):
108 shutil.copyfile(src, dst)
109 return [newpath]
110 elif osp.isdir(src):
111 if osp.exists(dst): # if the destination directory exists, delete it
112 shutil.rmtree(dst)
113
114 os.mkdir(dst)
115
116 files = []
117
118 for fpath in os.listdir(src): # recursively copy each child path
119 p = osp.join(path, fpath)
120 np = osp.join(newpath, fpath)
121 files += copy_path(srcdir, dstdir, p, np, exclude=exclude)
122
123 return files
124 else:
125 raise RuntimeError(f'unknown path type {src}')
126
127
128 def copy_paths(src, dst, paths, *, exclude=None):
129 """Copy files/directories given a list of their paths from
130 the source directory to the destination directory.
131
132 Parameters
133 ----------
134 src : path-like
135 Source directory to copy from.
136 dst : path-like
137 Destination directory to copy to.
138 paths : list of (path-like or 2-tuple of path-like)
139 Paths of the files/directories to copy relative to the source directory.
140 Pairs of paths in the list signify that the path to copy to is different
141 than the path copied from.
142 exclude : function(path-like) -> bool, keyword-only, optional
143 Conditional function on whether to exclude the given path.
144
145 Returns
146 -------
147 files : list of path-like
148 Paths of the copied files.
149 """
150 files = []
151
152 for path in paths:
153 if isinstance(path, tuple):
154 files += copy_path(src, dst, path[0], path[1], exclude=exclude)
155 else:
156 files += copy_path(src, dst, path, exclude=exclude)
157
158 return files
159
160
161 def update_toc(toc, paths, ignore=[]):
162 """Update the table of contents according to the paths of all files copied over.
163
164 Parameters
165 ----------
166 toc : JSON
167 Table of contents according to the JupyterBook specification.
168 paths : list of path-like
169 Paths of the files copied over.
170 ignore : list of path-like
171 List of directories to ignore when updating the table of contents.
172
173 Returns
174 -------
175 new_toc : JSON
176 Updated table of contents.
177 """
178 new_toc = copy.deepcopy(toc)
179
180 remaining_paths = []
181
182 # remove all paths in ignore list and those with the wrong extension
183 for path in paths:
184 base, ext = osp.splitext(path)
185
186 for prefix in ignore: # check if path should be ignored
187 if path.startswith(prefix):
188 break
189 else: # not on the ignore list
190 if ext in DOC_EXTS: # relevant filetype
191 remaining_paths.append(
192 base
193 ) # the toc does not include extensions
194
195 chapters = new_toc[1]['chapters']
196
197 for chapter in chapters:
198 if (
199 'file' not in chapter
200 or (index := chapter['file']) not in remaining_paths
201 ):
202 continue # skip irrelevant chapters
203
204 parent_dir = osp.dirname(index)
205 remaining_paths.remove(index)
206
207 sections = chapter['sections']
208 files = [section['file'] for section in sections]
209
210 # find and remove deleted files from toc
211 j = 0
212 for path in files:
213 if path in remaining_paths:
214 remaining_paths.remove(path)
215 j += 1
216 else:
217 print(f'deleting {path} from toc')
218 del sections[j] # delete from toc
219
220 new_files = filter(
221 lambda path: path.startswith(parent_dir), remaining_paths
222 )
223 for path in new_files:
224 print(f'adding {path} to toc')
225 sections.append({'file': path})
226 remaining_paths.remove(path)
227
228 return new_toc
229
230
231 def main(args):
232 dst = osp.join(
233 osp.dirname(osp.dirname(osp.dirname(__file__))), 'napari.github.io'
234 )
235
236 try:
237 dst = args[1]
238 except IndexError:
239 pass
240
241 files = copy_paths(SRC, dst, TO_COPY, exclude=exclude_filter)
242 toc_file = osp.join(dst, '_toc.yml')
243
244 with open(toc_file) as f:
245 toc = yaml.safe_load(f)
246
247 if toc is None:
248 print(f'toc file {toc_file} empty')
249 return
250
251 new_toc = update_toc(toc, files, TOC_IGNORE)
252
253 with open(toc_file, 'w') as f:
254 yaml.dump(new_toc, f)
255
256
257 if __name__ == '__main__':
258 main(sys.argv)
259
[end of docs/copy-docs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/copy-docs.py b/docs/copy-docs.py
--- a/docs/copy-docs.py
+++ b/docs/copy-docs.py
@@ -54,6 +54,13 @@
'glossary.md', # this document will still be at the destination ToC
'guides/stable/_layer_events.md',
'guides/stable/_viewer_events.md',
+ 'plugins/stable/_npe2_contributions.md',
+ 'plugins/stable/_npe2_manifest.md',
+ 'plugins/stable/_npe2_readers_guide.md',
+ 'plugins/stable/_npe2_widgets_guide.md',
+ 'plugins/stable/_npe2_writers_guide.md',
+ 'plugins/stable/_npe2_sample_data_guide.md',
+ 'plugins/stable/_layer_data_guide.md',
]
| {"golden_diff": "diff --git a/docs/copy-docs.py b/docs/copy-docs.py\n--- a/docs/copy-docs.py\n+++ b/docs/copy-docs.py\n@@ -54,6 +54,13 @@\n 'glossary.md', # this document will still be at the destination ToC\n 'guides/stable/_layer_events.md',\n 'guides/stable/_viewer_events.md',\n+ 'plugins/stable/_npe2_contributions.md',\n+ 'plugins/stable/_npe2_manifest.md',\n+ 'plugins/stable/_npe2_readers_guide.md',\n+ 'plugins/stable/_npe2_widgets_guide.md',\n+ 'plugins/stable/_npe2_writers_guide.md',\n+ 'plugins/stable/_npe2_sample_data_guide.md',\n+ 'plugins/stable/_layer_data_guide.md',\n ]\n", "issue": "missing doc items (may be copy-docs problem)\n## \ud83d\udc1b Bug\r\n\r\nI was looking at napari.org after seeing #3916 get merged (a big update to plugin docs), and it looks like the toc is missing items. This seems to have something to do with the copy-docs script.\r\n\r\nIn particular, \r\n* it looks like files that are not in the toc are missing\r\n* some items in the toc go missing\r\n* the order of the items in the toc seems to change from the authored order\r\n\r\n@tlambert03 please feel free to add detail\r\n\n", "before_files": [{"content": "\"\"\"\nCopy docs from the napari repo into the napari.github.io repo\nand update the table of contents.\nBy default, will assume that there is a folder named napari.github.io\nin the same directory as the napari folder, if not a different copy\ndestination can be provided.\n\nRead ORGANIZATION.md to learn more about how the documentation sources\nare organized, and how everything comes together.\n\npython -m copy-docs [dstdir]\n\"\"\"\n\nimport copy\nimport os\nimport os.path as osp\nimport shutil\nimport sys\nfrom fnmatch import fnmatch\n\nimport yaml\n\n# path to copy and locations to copy to if different\nTO_COPY = [\n 'ORGANIZATION.md',\n 'glossary.md',\n 'developers',\n 'community',\n 'howtos',\n 'release',\n 'roadmaps',\n 'images',\n osp.join('_templates', 'autosummary'),\n *[\n (dire, osp.join(dire, 'stable'))\n for dire in ('api', 'guides', 'plugins')\n ],\n]\n\n# paths to ignore\nIGNORE = [\n osp.join('images', 'logo.png'),\n]\n\nSRC = osp.dirname(__file__)\n\nDOC_EXTS = ['.md', '.rst', '.ipynb']\n\nTOC_IGNORE = [\n 'api/stable',\n 'images',\n '_templates',\n 'ORGANIZATION.md',\n 'glossary.md', # this document will still be at the destination ToC\n 'guides/stable/_layer_events.md',\n 'guides/stable/_viewer_events.md',\n]\n\n\ndef exclude_filter(path):\n \"\"\"Exclude files in the ignore list and duplicated files.\"\"\"\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file\n\n\ndef copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):\n \"\"\"Copy a path from the source directory to the destination directory,\n with the given path relative to the directory roots.\n\n Parameters\n ----------\n srcdir : path-like\n Source directory root to copy from.\n dstdir : path-like\n Destination directory root to copy to.\n path : path-like\n Path relative to the `srcdir` of the path to copy from.\n newpath : path-like, optional\n Path relative to the `dstdir` of the path to copy to.\n If not provided, will default to the value of `path`.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n if newpath is None:\n newpath = path\n\n src = osp.join(srcdir, path)\n dst = osp.join(dstdir, newpath)\n\n if exclude(src): # skip this path\n return []\n\n print(f'copying {src} to {dst}')\n\n if osp.isfile(src):\n shutil.copyfile(src, dst)\n return [newpath]\n elif osp.isdir(src):\n if osp.exists(dst): # if the destination directory exists, delete it\n shutil.rmtree(dst)\n\n os.mkdir(dst)\n\n files = []\n\n for fpath in os.listdir(src): # recursively copy each child path\n p = osp.join(path, fpath)\n np = osp.join(newpath, fpath)\n files += copy_path(srcdir, dstdir, p, np, exclude=exclude)\n\n return files\n else:\n raise RuntimeError(f'unknown path type {src}')\n\n\ndef copy_paths(src, dst, paths, *, exclude=None):\n \"\"\"Copy files/directories given a list of their paths from\n the source directory to the destination directory.\n\n Parameters\n ----------\n src : path-like\n Source directory to copy from.\n dst : path-like\n Destination directory to copy to.\n paths : list of (path-like or 2-tuple of path-like)\n Paths of the files/directories to copy relative to the source directory.\n Pairs of paths in the list signify that the path to copy to is different\n than the path copied from.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files\n\n\ndef update_toc(toc, paths, ignore=[]):\n \"\"\"Update the table of contents according to the paths of all files copied over.\n\n Parameters\n ----------\n toc : JSON\n Table of contents according to the JupyterBook specification.\n paths : list of path-like\n Paths of the files copied over.\n ignore : list of path-like\n List of directories to ignore when updating the table of contents.\n\n Returns\n -------\n new_toc : JSON\n Updated table of contents.\n \"\"\"\n new_toc = copy.deepcopy(toc)\n\n remaining_paths = []\n\n # remove all paths in ignore list and those with the wrong extension\n for path in paths:\n base, ext = osp.splitext(path)\n\n for prefix in ignore: # check if path should be ignored\n if path.startswith(prefix):\n break\n else: # not on the ignore list\n if ext in DOC_EXTS: # relevant filetype\n remaining_paths.append(\n base\n ) # the toc does not include extensions\n\n chapters = new_toc[1]['chapters']\n\n for chapter in chapters:\n if (\n 'file' not in chapter\n or (index := chapter['file']) not in remaining_paths\n ):\n continue # skip irrelevant chapters\n\n parent_dir = osp.dirname(index)\n remaining_paths.remove(index)\n\n sections = chapter['sections']\n files = [section['file'] for section in sections]\n\n # find and remove deleted files from toc\n j = 0\n for path in files:\n if path in remaining_paths:\n remaining_paths.remove(path)\n j += 1\n else:\n print(f'deleting {path} from toc')\n del sections[j] # delete from toc\n\n new_files = filter(\n lambda path: path.startswith(parent_dir), remaining_paths\n )\n for path in new_files:\n print(f'adding {path} to toc')\n sections.append({'file': path})\n remaining_paths.remove(path)\n\n return new_toc\n\n\ndef main(args):\n dst = osp.join(\n osp.dirname(osp.dirname(osp.dirname(__file__))), 'napari.github.io'\n )\n\n try:\n dst = args[1]\n except IndexError:\n pass\n\n files = copy_paths(SRC, dst, TO_COPY, exclude=exclude_filter)\n toc_file = osp.join(dst, '_toc.yml')\n\n with open(toc_file) as f:\n toc = yaml.safe_load(f)\n\n if toc is None:\n print(f'toc file {toc_file} empty')\n return\n\n new_toc = update_toc(toc, files, TOC_IGNORE)\n\n with open(toc_file, 'w') as f:\n yaml.dump(new_toc, f)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "path": "docs/copy-docs.py"}]} | 3,035 | 190 |
gh_patches_debug_6777 | rasdani/github-patches | git_diff | nautobot__nautobot-604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OPTIONS returns all Status choices instead of ones only related to the specific model
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.8
* Nautobot version: 1.0.1
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
Cables should only have three status choices and virtual machines should have 6. Numbers are arbitrary, but this is with a fresh install of Nautobot.
```python
>>> nb.dcim.cables.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]
>>> nb.virtualization.virtual_machines.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]
```
I haven't been able to dig into it too much, but I believe this is the culprit.
https://github.com/nautobot/nautobot/blob/develop/nautobot/extras/api/serializers.py#L621
<!-- What did you expect to happen? -->
### Expected Behavior
```python
>>> nb.dcim.cables.choices()['status']
[{'value': 'connected', 'display': 'Connected'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'planned', 'display': 'Planned'}]
>>> nb.virtualization.virtual_machines.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'staged', 'display': 'Staged'}]
```
<!-- What happened instead? -->
### Observed Behavior
See **Steps to Reproduce**.
</issue>
<code>
[start of nautobot/extras/api/fields.py]
1 from collections import OrderedDict
2
3 from rest_framework import serializers
4
5
6 class MultipleChoiceJSONField(serializers.MultipleChoiceField):
7 """A MultipleChoiceField that renders the received value as a JSON-compatible list rather than a set."""
8
9 def to_internal_value(self, data):
10 set_value = super().to_internal_value(data)
11 return sorted(set_value)
12
13
14 class StatusSerializerField(serializers.SlugRelatedField):
15 """Serializer field for `Status` object fields."""
16
17 show_choices = True
18
19 def __init__(self, **kwargs):
20 kwargs.setdefault("slug_field", "slug")
21 super().__init__(**kwargs)
22
23 def to_representation(self, obj):
24 """Make this field compatible w/ the existing API for `ChoiceField`."""
25 if obj == "":
26 return None
27
28 return OrderedDict(
29 [
30 ("value", obj.slug),
31 ("label", str(obj)),
32 ]
33 )
34
35 def to_internal_value(self, data):
36 """Always lower-case the custom choice value."""
37 if hasattr(data, "lower"):
38 data = data.lower()
39 return super().to_internal_value(data)
40
41 def get_choices(self, cutoff=None):
42 """
43 Return a nested list of dicts for enum choices.
44
45 This had to be overloaded since the base method calls
46 `to_representation()` which in our case is an OrderedDict and can't be
47 nested.
48 """
49 queryset = self.get_queryset()
50 if queryset is None:
51 # Ensure that field.choices returns something sensible
52 # even when accessed with a read-only field.
53 return {}
54
55 if cutoff is not None:
56 queryset = queryset[:cutoff]
57
58 return OrderedDict([(item.slug, self.display_value(item)) for item in queryset])
59
[end of nautobot/extras/api/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/extras/api/fields.py b/nautobot/extras/api/fields.py
--- a/nautobot/extras/api/fields.py
+++ b/nautobot/extras/api/fields.py
@@ -38,6 +38,12 @@
data = data.lower()
return super().to_internal_value(data)
+ def get_queryset(self):
+ """Only emit status options for this model/field combination."""
+ queryset = super().get_queryset()
+ model = self.parent.Meta.model
+ return queryset.get_for_model(model)
+
def get_choices(self, cutoff=None):
"""
Return a nested list of dicts for enum choices.
| {"golden_diff": "diff --git a/nautobot/extras/api/fields.py b/nautobot/extras/api/fields.py\n--- a/nautobot/extras/api/fields.py\n+++ b/nautobot/extras/api/fields.py\n@@ -38,6 +38,12 @@\n data = data.lower()\n return super().to_internal_value(data)\n \n+ def get_queryset(self):\n+ \"\"\"Only emit status options for this model/field combination.\"\"\"\n+ queryset = super().get_queryset()\n+ model = self.parent.Meta.model\n+ return queryset.get_for_model(model)\n+\n def get_choices(self, cutoff=None):\n \"\"\"\n Return a nested list of dicts for enum choices.\n", "issue": "OPTIONS returns all Status choices instead of ones only related to the specific model\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.8\r\n* Nautobot version: 1.0.1\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\nCables should only have three status choices and virtual machines should have 6. Numbers are arbitrary, but this is with a fresh install of Nautobot.\r\n\r\n```python\r\n>>> nb.dcim.cables.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]\r\n>>> nb.virtualization.virtual_machines.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]\r\n```\r\n\r\nI haven't been able to dig into it too much, but I believe this is the culprit.\r\n\r\nhttps://github.com/nautobot/nautobot/blob/develop/nautobot/extras/api/serializers.py#L621\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\n```python\r\n>>> nb.dcim.cables.choices()['status']\r\n[{'value': 'connected', 'display': 'Connected'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'planned', 'display': 'Planned'}]\r\n>>> nb.virtualization.virtual_machines.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'staged', 'display': 'Staged'}]\r\n```\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nSee **Steps to Reproduce**.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework import serializers\n\n\nclass MultipleChoiceJSONField(serializers.MultipleChoiceField):\n \"\"\"A MultipleChoiceField that renders the received value as a JSON-compatible list rather than a set.\"\"\"\n\n def to_internal_value(self, data):\n set_value = super().to_internal_value(data)\n return sorted(set_value)\n\n\nclass StatusSerializerField(serializers.SlugRelatedField):\n \"\"\"Serializer field for `Status` object fields.\"\"\"\n\n show_choices = True\n\n def __init__(self, **kwargs):\n kwargs.setdefault(\"slug_field\", \"slug\")\n super().__init__(**kwargs)\n\n def to_representation(self, obj):\n \"\"\"Make this field compatible w/ the existing API for `ChoiceField`.\"\"\"\n if obj == \"\":\n return None\n\n return OrderedDict(\n [\n (\"value\", obj.slug),\n (\"label\", str(obj)),\n ]\n )\n\n def to_internal_value(self, data):\n \"\"\"Always lower-case the custom choice value.\"\"\"\n if hasattr(data, \"lower\"):\n data = data.lower()\n return super().to_internal_value(data)\n\n def get_choices(self, cutoff=None):\n \"\"\"\n Return a nested list of dicts for enum choices.\n\n This had to be overloaded since the base method calls\n `to_representation()` which in our case is an OrderedDict and can't be\n nested.\n \"\"\"\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict([(item.slug, self.display_value(item)) for item in queryset])\n", "path": "nautobot/extras/api/fields.py"}]} | 2,089 | 148 |
gh_patches_debug_19871 | rasdani/github-patches | git_diff | ManimCommunity__manim-1516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove references to t_min and t_max in ParametricFunction
## Description of bug / unexpected behavior
With PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.
</issue>
<code>
[start of manim/mobject/functions.py]
1 """Mobjects representing function graphs."""
2
3 __all__ = ["ParametricFunction", "FunctionGraph"]
4
5
6 import numpy as np
7
8 from .. import config
9 from ..constants import *
10 from ..mobject.types.vectorized_mobject import VMobject
11 from ..utils.color import YELLOW
12
13
14 class ParametricFunction(VMobject):
15 """A parametric curve.
16
17 Examples
18 --------
19
20 .. manim:: PlotParametricFunction
21 :save_last_frame:
22
23 class PlotParametricFunction(Scene):
24 def func(self, t):
25 return np.array((np.sin(2 * t), np.sin(3 * t), 0))
26
27 def construct(self):
28 func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
29 self.add(func.scale(3))
30
31 .. manim:: ThreeDParametricSpring
32 :save_last_frame:
33
34 class ThreeDParametricSpring(ThreeDScene):
35 def construct(self):
36 curve1 = ParametricFunction(
37 lambda u: np.array([
38 1.2 * np.cos(u),
39 1.2 * np.sin(u),
40 u * 0.05
41 ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
42 ).set_shade_in_3d(True)
43 axes = ThreeDAxes()
44 self.add(axes, curve1)
45 self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)
46 self.wait()
47 """
48
49 def __init__(
50 self,
51 function=None,
52 t_range=None,
53 dt=1e-8,
54 discontinuities=None,
55 use_smoothing=True,
56 **kwargs
57 ):
58 self.function = function
59 t_range = np.array([0, 1, 0.01]) if t_range is None else t_range
60 if len(t_range) == 2:
61 t_range = [*t_range, 0.01]
62
63 self.dt = dt
64 self.discontinuities = [] if discontinuities is None else discontinuities
65 self.use_smoothing = use_smoothing
66 self.t_min, self.t_max, self.t_step = t_range
67
68 VMobject.__init__(self, **kwargs)
69
70 def get_function(self):
71 return self.function
72
73 def get_point_from_function(self, t):
74 return self.function(t)
75
76 def generate_points(self):
77
78 discontinuities = filter(
79 lambda t: self.t_min <= t <= self.t_max, self.discontinuities
80 )
81 discontinuities = np.array(list(discontinuities))
82 boundary_times = [
83 self.t_min,
84 self.t_max,
85 *(discontinuities - self.dt),
86 *(discontinuities + self.dt),
87 ]
88 boundary_times.sort()
89 for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):
90 t_range = [*np.arange(t1, t2, self.t_step), t2]
91 points = np.array([self.function(t) for t in t_range])
92 self.start_new_path(points[0])
93 self.add_points_as_corners(points[1:])
94 if self.use_smoothing:
95 # TODO: not in line with upstream, approx_smooth does not exist
96 self.make_smooth()
97 return self
98
99
100 class FunctionGraph(ParametricFunction):
101 def __init__(self, function, x_range=None, color=YELLOW, **kwargs):
102
103 if x_range is None:
104 x_range = np.array([-config["frame_x_radius"], config["frame_x_radius"]])
105
106 self.x_range = x_range
107 self.parametric_function = lambda t: np.array([t, function(t), 0])
108 self.function = function
109 super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)
110
111 def get_function(self):
112 return self.function
113
114 def get_point_from_function(self, x):
115 return self.parametric_function(x)
116
[end of manim/mobject/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py
--- a/manim/mobject/functions.py
+++ b/manim/mobject/functions.py
@@ -25,7 +25,7 @@
return np.array((np.sin(2 * t), np.sin(3 * t), 0))
def construct(self):
- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)
self.add(func.scale(3))
.. manim:: ThreeDParametricSpring
@@ -38,7 +38,7 @@
1.2 * np.cos(u),
1.2 * np.sin(u),
u * 0.05
- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])
).set_shade_in_3d(True)
axes = ThreeDAxes()
self.add(axes, curve1)
| {"golden_diff": "diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py\n--- a/manim/mobject/functions.py\n+++ b/manim/mobject/functions.py\n@@ -25,7 +25,7 @@\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n \n def construct(self):\n- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n \n .. manim:: ThreeDParametricSpring\n@@ -38,7 +38,7 @@\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n", "issue": "Remove references to t_min and t_max in ParametricFunction\n## Description of bug / unexpected behavior\r\n\r\nWith PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Mobjects representing function graphs.\"\"\"\n\n__all__ = [\"ParametricFunction\", \"FunctionGraph\"]\n\n\nimport numpy as np\n\nfrom .. import config\nfrom ..constants import *\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..utils.color import YELLOW\n\n\nclass ParametricFunction(VMobject):\n \"\"\"A parametric curve.\n\n Examples\n --------\n\n .. manim:: PlotParametricFunction\n :save_last_frame:\n\n class PlotParametricFunction(Scene):\n def func(self, t):\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n\n def construct(self):\n func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n\n .. manim:: ThreeDParametricSpring\n :save_last_frame:\n\n class ThreeDParametricSpring(ThreeDScene):\n def construct(self):\n curve1 = ParametricFunction(\n lambda u: np.array([\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)\n self.wait()\n \"\"\"\n\n def __init__(\n self,\n function=None,\n t_range=None,\n dt=1e-8,\n discontinuities=None,\n use_smoothing=True,\n **kwargs\n ):\n self.function = function\n t_range = np.array([0, 1, 0.01]) if t_range is None else t_range\n if len(t_range) == 2:\n t_range = [*t_range, 0.01]\n\n self.dt = dt\n self.discontinuities = [] if discontinuities is None else discontinuities\n self.use_smoothing = use_smoothing\n self.t_min, self.t_max, self.t_step = t_range\n\n VMobject.__init__(self, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, t):\n return self.function(t)\n\n def generate_points(self):\n\n discontinuities = filter(\n lambda t: self.t_min <= t <= self.t_max, self.discontinuities\n )\n discontinuities = np.array(list(discontinuities))\n boundary_times = [\n self.t_min,\n self.t_max,\n *(discontinuities - self.dt),\n *(discontinuities + self.dt),\n ]\n boundary_times.sort()\n for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):\n t_range = [*np.arange(t1, t2, self.t_step), t2]\n points = np.array([self.function(t) for t in t_range])\n self.start_new_path(points[0])\n self.add_points_as_corners(points[1:])\n if self.use_smoothing:\n # TODO: not in line with upstream, approx_smooth does not exist\n self.make_smooth()\n return self\n\n\nclass FunctionGraph(ParametricFunction):\n def __init__(self, function, x_range=None, color=YELLOW, **kwargs):\n\n if x_range is None:\n x_range = np.array([-config[\"frame_x_radius\"], config[\"frame_x_radius\"]])\n\n self.x_range = x_range\n self.parametric_function = lambda t: np.array([t, function(t), 0])\n self.function = function\n super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, x):\n return self.parametric_function(x)\n", "path": "manim/mobject/functions.py"}]} | 1,735 | 276 |
gh_patches_debug_31390 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-5388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Mod] Mention spam accounting for roles
# Feature request
After finalizing https://github.com/Cog-Creators/Red-DiscordBot/pull/4359 (allowing for duplicate mentions). A small discussion was brought up (I forgot with whom), to possibly make something that would / could account for role mentions as well.
#### Select the type of feature you are requesting:
<!-- To check a box, replace the space between the [] with a x -->
- [ ] Cog
- [x] Command
- [ ] API functionality
#### Describe your requested feature
This could either be it's own count system (separate from total user mentions. So user mentions could be set to 7, while role mentions could be set to 3), or it would take both user mentions and role mentions into the same context. (each would account for 7 in this example).
I wanted to make this issue to gather feedback, I am more than willing to work on this myself, though, I want to make 100% sure that this is something worth working on, getting proper feedback on how to set this up and how it should action.
The reason why I believe this should be something for Core Mod is because while it can 100% be used as a third party cog, I figured this would give us and any user who uses core mod better advantages and control over antimentionspam we offer.
</issue>
<code>
[start of redbot/cogs/mod/events.py]
1 import logging
2 from datetime import timezone
3 from collections import defaultdict, deque
4
5 import discord
6 from redbot.core import i18n, modlog, commands
7 from redbot.core.utils.mod import is_mod_or_superior
8 from .abc import MixinMeta
9
10 _ = i18n.Translator("Mod", __file__)
11 log = logging.getLogger("red.mod")
12
13
14 class Events(MixinMeta):
15 """
16 This is a mixin for the core mod cog
17 Has a bunch of things split off to here.
18 """
19
20 async def check_duplicates(self, message):
21 guild = message.guild
22 author = message.author
23
24 guild_cache = self.cache.get(guild.id, None)
25 if guild_cache is None:
26 repeats = await self.config.guild(guild).delete_repeats()
27 if repeats == -1:
28 return False
29 guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))
30
31 if not message.content:
32 return False
33
34 guild_cache[author].append(message.content)
35 msgs = guild_cache[author]
36 if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:
37 try:
38 await message.delete()
39 return True
40 except discord.HTTPException:
41 pass
42 return False
43
44 async def check_mention_spam(self, message):
45 guild, author = message.guild, message.author
46 mention_spam = await self.config.guild(guild).mention_spam.all()
47
48 if mention_spam["strict"]: # if strict is enabled
49 mentions = message.raw_mentions
50 else: # if not enabled
51 mentions = set(message.mentions)
52
53 if mention_spam["ban"]:
54 if len(mentions) >= mention_spam["ban"]:
55 try:
56 await guild.ban(author, reason=_("Mention spam (Autoban)"))
57 except discord.HTTPException:
58 log.warning(
59 "Failed to ban a member ({member}) for mention spam in server {guild}.".format(
60 member=author.id, guild=guild.id
61 )
62 )
63 else:
64 await modlog.create_case(
65 self.bot,
66 guild,
67 message.created_at,
68 "ban",
69 author,
70 guild.me,
71 _("Mention spam (Autoban)"),
72 until=None,
73 channel=None,
74 )
75 return True
76
77 if mention_spam["kick"]:
78 if len(mentions) >= mention_spam["kick"]:
79 try:
80 await guild.kick(author, reason=_("Mention Spam (Autokick)"))
81 except discord.HTTPException:
82 log.warning(
83 "Failed to kick a member ({member}) for mention spam in server {guild}".format(
84 member=author.id, guild=guild.id
85 )
86 )
87 else:
88 await modlog.create_case(
89 self.bot,
90 guild,
91 message.created_at,
92 "kick",
93 author,
94 guild.me,
95 _("Mention spam (Autokick)"),
96 until=None,
97 channel=None,
98 )
99 return True
100
101 if mention_spam["warn"]:
102 if len(mentions) >= mention_spam["warn"]:
103 try:
104 await author.send(_("Please do not mass mention people!"))
105 except (discord.HTTPException, discord.Forbidden):
106 try:
107 await message.channel.send(
108 _("{member}, Please do not mass mention people!").format(
109 member=author.mention
110 )
111 )
112 except (discord.HTTPException, discord.Forbidden):
113 log.warning(
114 "Failed to warn a member ({member}) for mention spam in server {guild}".format(
115 member=author.id, guild=guild.id
116 )
117 )
118 return False
119
120 await modlog.create_case(
121 self.bot,
122 guild,
123 message.created_at,
124 "warning",
125 author,
126 guild.me,
127 _("Mention spam (Autowarn)"),
128 until=None,
129 channel=None,
130 )
131 return True
132 return False
133
134 @commands.Cog.listener()
135 async def on_message(self, message):
136 author = message.author
137 if message.guild is None or self.bot.user == author:
138 return
139
140 if await self.bot.cog_disabled_in_guild(self, message.guild):
141 return
142
143 valid_user = isinstance(author, discord.Member) and not author.bot
144 if not valid_user:
145 return
146
147 # Bots and mods or superior are ignored from the filter
148 mod_or_superior = await is_mod_or_superior(self.bot, obj=author)
149 if mod_or_superior:
150 return
151 # As are anyone configured to be
152 if await self.bot.is_automod_immune(message):
153 return
154
155 await i18n.set_contextual_locales_from_guild(self.bot, message.guild)
156
157 deleted = await self.check_duplicates(message)
158 if not deleted:
159 await self.check_mention_spam(message)
160
161 @commands.Cog.listener()
162 async def on_user_update(self, before: discord.User, after: discord.User):
163 if before.name != after.name:
164 track_all_names = await self.config.track_all_names()
165 if not track_all_names:
166 return
167 async with self.config.user(before).past_names() as name_list:
168 while None in name_list: # clean out null entries from a bug
169 name_list.remove(None)
170 if before.name in name_list:
171 # Ensure order is maintained without duplicates occurring
172 name_list.remove(before.name)
173 name_list.append(before.name)
174 while len(name_list) > 20:
175 name_list.pop(0)
176
177 @commands.Cog.listener()
178 async def on_member_update(self, before: discord.Member, after: discord.Member):
179 if before.nick != after.nick and before.nick is not None:
180 guild = after.guild
181 if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):
182 return
183 track_all_names = await self.config.track_all_names()
184 track_nicknames = await self.config.guild(guild).track_nicknames()
185 if (not track_all_names) or (not track_nicknames):
186 return
187 async with self.config.member(before).past_nicks() as nick_list:
188 while None in nick_list: # clean out null entries from a bug
189 nick_list.remove(None)
190 if before.nick in nick_list:
191 nick_list.remove(before.nick)
192 nick_list.append(before.nick)
193 while len(nick_list) > 20:
194 nick_list.pop(0)
195
[end of redbot/cogs/mod/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py
--- a/redbot/cogs/mod/events.py
+++ b/redbot/cogs/mod/events.py
@@ -46,12 +46,12 @@
mention_spam = await self.config.guild(guild).mention_spam.all()
if mention_spam["strict"]: # if strict is enabled
- mentions = message.raw_mentions
+ mentions = len(message.raw_mentions) + len(message.raw_role_mentions)
else: # if not enabled
- mentions = set(message.mentions)
+ mentions = len(set(message.mentions)) + len(set(message.role_mentions))
if mention_spam["ban"]:
- if len(mentions) >= mention_spam["ban"]:
+ if mentions >= mention_spam["ban"]:
try:
await guild.ban(author, reason=_("Mention spam (Autoban)"))
except discord.HTTPException:
@@ -75,7 +75,7 @@
return True
if mention_spam["kick"]:
- if len(mentions) >= mention_spam["kick"]:
+ if mentions >= mention_spam["kick"]:
try:
await guild.kick(author, reason=_("Mention Spam (Autokick)"))
except discord.HTTPException:
@@ -99,7 +99,7 @@
return True
if mention_spam["warn"]:
- if len(mentions) >= mention_spam["warn"]:
+ if mentions >= mention_spam["warn"]:
try:
await author.send(_("Please do not mass mention people!"))
except (discord.HTTPException, discord.Forbidden):
| {"golden_diff": "diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py\n--- a/redbot/cogs/mod/events.py\n+++ b/redbot/cogs/mod/events.py\n@@ -46,12 +46,12 @@\n mention_spam = await self.config.guild(guild).mention_spam.all()\n \n if mention_spam[\"strict\"]: # if strict is enabled\n- mentions = message.raw_mentions\n+ mentions = len(message.raw_mentions) + len(message.raw_role_mentions)\n else: # if not enabled\n- mentions = set(message.mentions)\n+ mentions = len(set(message.mentions)) + len(set(message.role_mentions))\n \n if mention_spam[\"ban\"]:\n- if len(mentions) >= mention_spam[\"ban\"]:\n+ if mentions >= mention_spam[\"ban\"]:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n@@ -75,7 +75,7 @@\n return True\n \n if mention_spam[\"kick\"]:\n- if len(mentions) >= mention_spam[\"kick\"]:\n+ if mentions >= mention_spam[\"kick\"]:\n try:\n await guild.kick(author, reason=_(\"Mention Spam (Autokick)\"))\n except discord.HTTPException:\n@@ -99,7 +99,7 @@\n return True\n \n if mention_spam[\"warn\"]:\n- if len(mentions) >= mention_spam[\"warn\"]:\n+ if mentions >= mention_spam[\"warn\"]:\n try:\n await author.send(_(\"Please do not mass mention people!\"))\n except (discord.HTTPException, discord.Forbidden):\n", "issue": "[Mod] Mention spam accounting for roles\n# Feature request\r\n\r\nAfter finalizing https://github.com/Cog-Creators/Red-DiscordBot/pull/4359 (allowing for duplicate mentions). A small discussion was brought up (I forgot with whom), to possibly make something that would / could account for role mentions as well. \r\n\r\n#### Select the type of feature you are requesting:\r\n\r\n<!-- To check a box, replace the space between the [] with a x -->\r\n\r\n- [ ] Cog\r\n- [x] Command\r\n- [ ] API functionality\r\n\r\n#### Describe your requested feature\r\n\r\nThis could either be it's own count system (separate from total user mentions. So user mentions could be set to 7, while role mentions could be set to 3), or it would take both user mentions and role mentions into the same context. (each would account for 7 in this example). \r\n\r\nI wanted to make this issue to gather feedback, I am more than willing to work on this myself, though, I want to make 100% sure that this is something worth working on, getting proper feedback on how to set this up and how it should action.\r\n\r\nThe reason why I believe this should be something for Core Mod is because while it can 100% be used as a third party cog, I figured this would give us and any user who uses core mod better advantages and control over antimentionspam we offer.\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import timezone\nfrom collections import defaultdict, deque\n\nimport discord\nfrom redbot.core import i18n, modlog, commands\nfrom redbot.core.utils.mod import is_mod_or_superior\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\nlog = logging.getLogger(\"red.mod\")\n\n\nclass Events(MixinMeta):\n \"\"\"\n This is a mixin for the core mod cog\n Has a bunch of things split off to here.\n \"\"\"\n\n async def check_duplicates(self, message):\n guild = message.guild\n author = message.author\n\n guild_cache = self.cache.get(guild.id, None)\n if guild_cache is None:\n repeats = await self.config.guild(guild).delete_repeats()\n if repeats == -1:\n return False\n guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))\n\n if not message.content:\n return False\n\n guild_cache[author].append(message.content)\n msgs = guild_cache[author]\n if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:\n try:\n await message.delete()\n return True\n except discord.HTTPException:\n pass\n return False\n\n async def check_mention_spam(self, message):\n guild, author = message.guild, message.author\n mention_spam = await self.config.guild(guild).mention_spam.all()\n\n if mention_spam[\"strict\"]: # if strict is enabled\n mentions = message.raw_mentions\n else: # if not enabled\n mentions = set(message.mentions)\n\n if mention_spam[\"ban\"]:\n if len(mentions) >= mention_spam[\"ban\"]:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to ban a member ({member}) for mention spam in server {guild}.\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"ban\",\n author,\n guild.me,\n _(\"Mention spam (Autoban)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"kick\"]:\n if len(mentions) >= mention_spam[\"kick\"]:\n try:\n await guild.kick(author, reason=_(\"Mention Spam (Autokick)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to kick a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"kick\",\n author,\n guild.me,\n _(\"Mention spam (Autokick)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"warn\"]:\n if len(mentions) >= mention_spam[\"warn\"]:\n try:\n await author.send(_(\"Please do not mass mention people!\"))\n except (discord.HTTPException, discord.Forbidden):\n try:\n await message.channel.send(\n _(\"{member}, Please do not mass mention people!\").format(\n member=author.mention\n )\n )\n except (discord.HTTPException, discord.Forbidden):\n log.warning(\n \"Failed to warn a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n return False\n\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"warning\",\n author,\n guild.me,\n _(\"Mention spam (Autowarn)\"),\n until=None,\n channel=None,\n )\n return True\n return False\n\n @commands.Cog.listener()\n async def on_message(self, message):\n author = message.author\n if message.guild is None or self.bot.user == author:\n return\n\n if await self.bot.cog_disabled_in_guild(self, message.guild):\n return\n\n valid_user = isinstance(author, discord.Member) and not author.bot\n if not valid_user:\n return\n\n # Bots and mods or superior are ignored from the filter\n mod_or_superior = await is_mod_or_superior(self.bot, obj=author)\n if mod_or_superior:\n return\n # As are anyone configured to be\n if await self.bot.is_automod_immune(message):\n return\n\n await i18n.set_contextual_locales_from_guild(self.bot, message.guild)\n\n deleted = await self.check_duplicates(message)\n if not deleted:\n await self.check_mention_spam(message)\n\n @commands.Cog.listener()\n async def on_user_update(self, before: discord.User, after: discord.User):\n if before.name != after.name:\n track_all_names = await self.config.track_all_names()\n if not track_all_names:\n return\n async with self.config.user(before).past_names() as name_list:\n while None in name_list: # clean out null entries from a bug\n name_list.remove(None)\n if before.name in name_list:\n # Ensure order is maintained without duplicates occurring\n name_list.remove(before.name)\n name_list.append(before.name)\n while len(name_list) > 20:\n name_list.pop(0)\n\n @commands.Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member):\n if before.nick != after.nick and before.nick is not None:\n guild = after.guild\n if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):\n return\n track_all_names = await self.config.track_all_names()\n track_nicknames = await self.config.guild(guild).track_nicknames()\n if (not track_all_names) or (not track_nicknames):\n return\n async with self.config.member(before).past_nicks() as nick_list:\n while None in nick_list: # clean out null entries from a bug\n nick_list.remove(None)\n if before.nick in nick_list:\n nick_list.remove(before.nick)\n nick_list.append(before.nick)\n while len(nick_list) > 20:\n nick_list.pop(0)\n", "path": "redbot/cogs/mod/events.py"}]} | 2,703 | 358 |
gh_patches_debug_3330 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable Adobe DTM for Analytics (closes #160)
This embeds the code but Adobe’s instructions violate web
performance guidelines and we should review this carefully
to see how much Adobe is affecting site performance.
</issue>
<code>
[start of concordia/context_processors.py]
1 from django.conf import settings
2
3
4 def system_configuration(request):
5 """
6 Expose some system configuration to the default template context
7 """
8
9 return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
10
11
12 def site_navigation(request):
13 data = {"VIEW_NAME": request.resolver_match.view_name}
14
15 data["VIEW_NAME_FOR_CSS"] = data["VIEW_NAME"].replace(":", "--")
16
17 path_components = request.path.strip("/").split("/")
18 for i, component in enumerate(path_components, start=1):
19 data["PATH_LEVEL_%d" % i] = component
20
21 return data
22
[end of concordia/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/concordia/context_processors.py b/concordia/context_processors.py
--- a/concordia/context_processors.py
+++ b/concordia/context_processors.py
@@ -6,7 +6,10 @@
Expose some system configuration to the default template context
"""
- return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
+ return {
+ "SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None),
+ "CONCORDIA_ENVIRONMENT": settings.CONCORDIA_ENVIRONMENT,
+ }
def site_navigation(request):
| {"golden_diff": "diff --git a/concordia/context_processors.py b/concordia/context_processors.py\n--- a/concordia/context_processors.py\n+++ b/concordia/context_processors.py\n@@ -6,7 +6,10 @@\n Expose some system configuration to the default template context\n \"\"\"\n \n- return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n+ return {\n+ \"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None),\n+ \"CONCORDIA_ENVIRONMENT\": settings.CONCORDIA_ENVIRONMENT,\n+ }\n \n \n def site_navigation(request):\n", "issue": "Enable Adobe DTM for Analytics (closes #160)\nThis embeds the code but Adobe\u2019s instructions violate web\r\nperformance guidelines and we should review this carefully\r\nto see how much Adobe is affecting site performance.\n", "before_files": [{"content": "from django.conf import settings\n\n\ndef system_configuration(request):\n \"\"\"\n Expose some system configuration to the default template context\n \"\"\"\n\n return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n\n\ndef site_navigation(request):\n data = {\"VIEW_NAME\": request.resolver_match.view_name}\n\n data[\"VIEW_NAME_FOR_CSS\"] = data[\"VIEW_NAME\"].replace(\":\", \"--\")\n\n path_components = request.path.strip(\"/\").split(\"/\")\n for i, component in enumerate(path_components, start=1):\n data[\"PATH_LEVEL_%d\" % i] = component\n\n return data\n", "path": "concordia/context_processors.py"}]} | 754 | 134 |
gh_patches_debug_26332 | rasdani/github-patches | git_diff | jupyter__docker-stacks-388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider not writing in user home
All the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.
I might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.
</issue>
<code>
[start of scipy-notebook/mplimporthook.py]
1 """Startup script for IPython kernel.
2
3 Installs an import hook to configure the matplotlib backend on the fly.
4
5 Originally from @minrk at
6 https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
7 Repurposed for docker-stacks to address repeat bugs like
8 https://github.com/jupyter/docker-stacks/issues/235.
9 """
10 import sys
11 from IPython import get_ipython
12
13 class MatplotlibFinder(object):
14 """Import hook that notices when matplotlib.pyplot or pylab is imported
15 and tries to configure the matplotlib backend appropriately for the
16 environment.
17 """
18 _called = False
19
20 def find_module(self, fullname, path=None):
21 if self._called:
22 # already handled
23 return
24
25 if fullname not in ('pylab', 'matplotlib.pyplot'):
26 # not matplotlib
27 return
28
29 # don't call me again
30 self._called = True
31
32 try:
33 # remove myself from the import hooks
34 sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
35 except ValueError:
36 pass
37
38 ip = get_ipython()
39 if ip is None:
40 # not in an interactive environment
41 return
42
43 if ip.pylab_gui_select:
44 # backend already selected
45 return
46
47 if hasattr(ip, 'kernel'):
48 # default to inline in kernel environments
49 ip.enable_matplotlib('inline')
50 else:
51 print('enabling matplotlib')
52 ip.enable_matplotlib()
53
54 # install the finder immediately
55 sys.meta_path.insert(0, MatplotlibFinder())
[end of scipy-notebook/mplimporthook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py
deleted file mode 100644
--- a/scipy-notebook/mplimporthook.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Startup script for IPython kernel.
-
-Installs an import hook to configure the matplotlib backend on the fly.
-
-Originally from @minrk at
-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
-Repurposed for docker-stacks to address repeat bugs like
-https://github.com/jupyter/docker-stacks/issues/235.
-"""
-import sys
-from IPython import get_ipython
-
-class MatplotlibFinder(object):
- """Import hook that notices when matplotlib.pyplot or pylab is imported
- and tries to configure the matplotlib backend appropriately for the
- environment.
- """
- _called = False
-
- def find_module(self, fullname, path=None):
- if self._called:
- # already handled
- return
-
- if fullname not in ('pylab', 'matplotlib.pyplot'):
- # not matplotlib
- return
-
- # don't call me again
- self._called = True
-
- try:
- # remove myself from the import hooks
- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
- except ValueError:
- pass
-
- ip = get_ipython()
- if ip is None:
- # not in an interactive environment
- return
-
- if ip.pylab_gui_select:
- # backend already selected
- return
-
- if hasattr(ip, 'kernel'):
- # default to inline in kernel environments
- ip.enable_matplotlib('inline')
- else:
- print('enabling matplotlib')
- ip.enable_matplotlib()
-
-# install the finder immediately
-sys.meta_path.insert(0, MatplotlibFinder())
\ No newline at end of file
| {"golden_diff": "diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py\ndeleted file mode 100644\n--- a/scipy-notebook/mplimporthook.py\n+++ /dev/null\n@@ -1,55 +0,0 @@\n-\"\"\"Startup script for IPython kernel.\n-\n-Installs an import hook to configure the matplotlib backend on the fly.\n-\n-Originally from @minrk at \n-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\n-Repurposed for docker-stacks to address repeat bugs like\n-https://github.com/jupyter/docker-stacks/issues/235.\n-\"\"\"\n-import sys\n-from IPython import get_ipython\n-\n-class MatplotlibFinder(object):\n- \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n- and tries to configure the matplotlib backend appropriately for the\n- environment.\n- \"\"\"\n- _called = False\n- \n- def find_module(self, fullname, path=None):\n- if self._called:\n- # already handled\n- return\n- \n- if fullname not in ('pylab', 'matplotlib.pyplot'):\n- # not matplotlib\n- return\n- \n- # don't call me again\n- self._called = True\n- \n- try:\n- # remove myself from the import hooks\n- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n- except ValueError:\n- pass\n- \n- ip = get_ipython()\n- if ip is None:\n- # not in an interactive environment\n- return\n- \n- if ip.pylab_gui_select:\n- # backend already selected\n- return\n- \n- if hasattr(ip, 'kernel'):\n- # default to inline in kernel environments\n- ip.enable_matplotlib('inline')\n- else:\n- print('enabling matplotlib')\n- ip.enable_matplotlib()\n-\n-# install the finder immediately\n-sys.meta_path.insert(0, MatplotlibFinder())\n\\ No newline at end of file\n", "issue": "Consider not writing in user home\nAll the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.\r\n\r\nI might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.\n", "before_files": [{"content": "\"\"\"Startup script for IPython kernel.\n\nInstalls an import hook to configure the matplotlib backend on the fly.\n\nOriginally from @minrk at \nhttps://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\nRepurposed for docker-stacks to address repeat bugs like\nhttps://github.com/jupyter/docker-stacks/issues/235.\n\"\"\"\nimport sys\nfrom IPython import get_ipython\n\nclass MatplotlibFinder(object):\n \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n and tries to configure the matplotlib backend appropriately for the\n environment.\n \"\"\"\n _called = False\n \n def find_module(self, fullname, path=None):\n if self._called:\n # already handled\n return\n \n if fullname not in ('pylab', 'matplotlib.pyplot'):\n # not matplotlib\n return\n \n # don't call me again\n self._called = True\n \n try:\n # remove myself from the import hooks\n sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n except ValueError:\n pass\n \n ip = get_ipython()\n if ip is None:\n # not in an interactive environment\n return\n \n if ip.pylab_gui_select:\n # backend already selected\n return\n \n if hasattr(ip, 'kernel'):\n # default to inline in kernel environments\n ip.enable_matplotlib('inline')\n else:\n print('enabling matplotlib')\n ip.enable_matplotlib()\n\n# install the finder immediately\nsys.meta_path.insert(0, MatplotlibFinder())", "path": "scipy-notebook/mplimporthook.py"}]} | 1,091 | 463 |
gh_patches_debug_14024 | rasdani/github-patches | git_diff | ivy-llc__ivy-16042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cosine_similarity
#15051
</issue>
<code>
[start of ivy/functional/frontends/paddle/nn/functional/common.py]
1 # local
2
[end of ivy/functional/frontends/paddle/nn/functional/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -1 +1,25 @@
# local
+import ivy
+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+from ivy.func_wrapper import with_unsupported_dtypes
+
+
+@with_unsupported_dtypes({"2.0.1 and below": ("float16", "bfloat16")}, "torch")
+@to_ivy_arrays_and_back
+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
+ numerator = ivy.sum(x1 * x2, axis=axis)
+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
+ else:
+ numerator = ivy.sum(x1 * x2)
+ x1_squared_norm = ivy.sum(ivy.square(x1))
+ x2_squared_norm = ivy.sum(ivy.square(x2))
+
+ x1_norm = ivy.sqrt(x1_squared_norm)
+ x2_norm = ivy.sqrt(x2_squared_norm)
+ norm_mm = x1_norm * x2_norm
+ denominator = ivy.maximum(norm_mm, eps)
+
+ cosine = numerator / denominator
+ return cosine
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -1 +1,25 @@\n # local\n+import ivy\n+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n+from ivy.func_wrapper import with_unsupported_dtypes\n+\n+\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n+@to_ivy_arrays_and_back\n+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n+ numerator = ivy.sum(x1 * x2, axis=axis)\n+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n+ else:\n+ numerator = ivy.sum(x1 * x2)\n+ x1_squared_norm = ivy.sum(ivy.square(x1))\n+ x2_squared_norm = ivy.sum(ivy.square(x2))\n+\n+ x1_norm = ivy.sqrt(x1_squared_norm)\n+ x2_norm = ivy.sqrt(x2_squared_norm)\n+ norm_mm = x1_norm * x2_norm\n+ denominator = ivy.maximum(norm_mm, eps)\n+\n+ cosine = numerator / denominator\n+ return cosine\n", "issue": "cosine_similarity\n#15051 \n", "before_files": [{"content": "# local\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 569 | 380 |
gh_patches_debug_31672 | rasdani/github-patches | git_diff | python-discord__bot-1088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Catch error when source command cannot retrieve lines or file
Sentry Issue: [BOT-7K](https://sentry.io/organizations/python-discord/issues/1816540252/?referrer=github_integration)
This happens if the target command is one which has been added via internal eval or some other dynamic method, so it's _really_ low priority.
I think it'd be most convenient to let the error propagate a bit and handle it here https://github.com/python-discord/bot/blob/3649c2a03dc158a25f2c3be98db8691f903a1953/bot/cogs/source.py#L59
```
OSError: could not get source code
(1 additional frame(s) were not displayed)
...
File "bot/cogs/source.py", line 59, in source_command
embed = await self.build_embed(source_item)
File "bot/cogs/source.py", line 102, in build_embed
url, location, first_line = self.get_source_link(source_object)
File "bot/cogs/source.py", line 84, in get_source_link
lines, first_line_no = inspect.getsourcelines(src)
File "inspect.py", line 967, in getsourcelines
lines, lnum = findsource(object)
File "inspect.py", line 798, in findsource
raise OSError('could not get source code')
```
</issue>
<code>
[start of bot/cogs/source.py]
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 raise commands.BadArgument(
39 f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
40 )
41
42
43 class BotSource(commands.Cog):
44 """Displays information about the bot's source code."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 @commands.command(name="source", aliases=("src",))
50 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
51 """Display information and a GitHub link to the source code of a command, tag, or cog."""
52 if not source_item:
53 embed = Embed(title="Bot's GitHub Repository")
54 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
55 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
56 await ctx.send(embed=embed)
57 return
58
59 embed = await self.build_embed(source_item)
60 await ctx.send(embed=embed)
61
62 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
63 """Build GitHub link of source item, return this link, file location and first line number."""
64 if isinstance(source_item, commands.HelpCommand):
65 src = type(source_item)
66 filename = inspect.getsourcefile(src)
67 elif isinstance(source_item, commands.Command):
68 if source_item.cog_name == "Alias":
69 cmd_name = source_item.callback.__name__.replace("_alias", "")
70 cmd = self.bot.get_command(cmd_name.replace("_", " "))
71 src = cmd.callback.__code__
72 filename = src.co_filename
73 else:
74 src = source_item.callback.__code__
75 filename = src.co_filename
76 elif isinstance(source_item, str):
77 tags_cog = self.bot.get_cog("Tags")
78 filename = tags_cog._cache[source_item]["location"]
79 else:
80 src = type(source_item)
81 filename = inspect.getsourcefile(src)
82
83 if not isinstance(source_item, str):
84 lines, first_line_no = inspect.getsourcelines(src)
85 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
86 else:
87 first_line_no = None
88 lines_extension = ""
89
90 # Handle tag file location differently than others to avoid errors in some cases
91 if not first_line_no:
92 file_location = Path(filename).relative_to("/bot/")
93 else:
94 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
95
96 url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
97
98 return url, file_location, first_line_no or None
99
100 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
101 """Build embed based on source object."""
102 url, location, first_line = self.get_source_link(source_object)
103
104 if isinstance(source_object, commands.HelpCommand):
105 title = "Help Command"
106 description = source_object.__doc__.splitlines()[1]
107 elif isinstance(source_object, commands.Command):
108 if source_object.cog_name == "Alias":
109 cmd_name = source_object.callback.__name__.replace("_alias", "")
110 cmd = self.bot.get_command(cmd_name.replace("_", " "))
111 description = cmd.short_doc
112 else:
113 description = source_object.short_doc
114
115 title = f"Command: {source_object.qualified_name}"
116 elif isinstance(source_object, str):
117 title = f"Tag: {source_object}"
118 description = ""
119 else:
120 title = f"Cog: {source_object.qualified_name}"
121 description = source_object.description.splitlines()[0]
122
123 embed = Embed(title=title, description=description)
124 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
125 line_text = f":{first_line}" if first_line else ""
126 embed.set_footer(text=f"{location}{line_text}")
127
128 return embed
129
130
131 def setup(bot: Bot) -> None:
132 """Load the BotSource cog."""
133 bot.add_cog(BotSource(bot))
134
[end of bot/cogs/source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/source.py b/bot/cogs/source.py
--- a/bot/cogs/source.py
+++ b/bot/cogs/source.py
@@ -60,11 +60,12 @@
await ctx.send(embed=embed)
def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
- """Build GitHub link of source item, return this link, file location and first line number."""
- if isinstance(source_item, commands.HelpCommand):
- src = type(source_item)
- filename = inspect.getsourcefile(src)
- elif isinstance(source_item, commands.Command):
+ """
+ Build GitHub link of source item, return this link, file location and first line number.
+
+ Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
+ """
+ if isinstance(source_item, commands.Command):
if source_item.cog_name == "Alias":
cmd_name = source_item.callback.__name__.replace("_alias", "")
cmd = self.bot.get_command(cmd_name.replace("_", " "))
@@ -78,10 +79,17 @@
filename = tags_cog._cache[source_item]["location"]
else:
src = type(source_item)
- filename = inspect.getsourcefile(src)
+ try:
+ filename = inspect.getsourcefile(src)
+ except TypeError:
+ raise commands.BadArgument("Cannot get source for a dynamically-created object.")
if not isinstance(source_item, str):
- lines, first_line_no = inspect.getsourcelines(src)
+ try:
+ lines, first_line_no = inspect.getsourcelines(src)
+ except OSError:
+ raise commands.BadArgument("Cannot get source for a dynamically-created object.")
+
lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
else:
first_line_no = None
| {"golden_diff": "diff --git a/bot/cogs/source.py b/bot/cogs/source.py\n--- a/bot/cogs/source.py\n+++ b/bot/cogs/source.py\n@@ -60,11 +60,12 @@\n await ctx.send(embed=embed)\n \n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n- \"\"\"Build GitHub link of source item, return this link, file location and first line number.\"\"\"\n- if isinstance(source_item, commands.HelpCommand):\n- src = type(source_item)\n- filename = inspect.getsourcefile(src)\n- elif isinstance(source_item, commands.Command):\n+ \"\"\"\n+ Build GitHub link of source item, return this link, file location and first line number.\n+\n+ Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n+ \"\"\"\n+ if isinstance(source_item, commands.Command):\n if source_item.cog_name == \"Alias\":\n cmd_name = source_item.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n@@ -78,10 +79,17 @@\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n- filename = inspect.getsourcefile(src)\n+ try:\n+ filename = inspect.getsourcefile(src)\n+ except TypeError:\n+ raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n \n if not isinstance(source_item, str):\n- lines, first_line_no = inspect.getsourcelines(src)\n+ try:\n+ lines, first_line_no = inspect.getsourcelines(src)\n+ except OSError:\n+ raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n+\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n", "issue": "Catch error when source command cannot retrieve lines or file\nSentry Issue: [BOT-7K](https://sentry.io/organizations/python-discord/issues/1816540252/?referrer=github_integration)\r\n\r\nThis happens if the target command is one which has been added via internal eval or some other dynamic method, so it's _really_ low priority.\r\n\r\nI think it'd be most convenient to let the error propagate a bit and handle it here https://github.com/python-discord/bot/blob/3649c2a03dc158a25f2c3be98db8691f903a1953/bot/cogs/source.py#L59\r\n\r\n```\r\nOSError: could not get source code\r\n(1 additional frame(s) were not displayed)\r\n...\r\n File \"bot/cogs/source.py\", line 59, in source_command\r\n embed = await self.build_embed(source_item)\r\n File \"bot/cogs/source.py\", line 102, in build_embed\r\n url, location, first_line = self.get_source_link(source_object)\r\n File \"bot/cogs/source.py\", line 84, in get_source_link\r\n lines, first_line_no = inspect.getsourcelines(src)\r\n File \"inspect.py\", line 967, in getsourcelines\r\n lines, lnum = findsource(object)\r\n File \"inspect.py\", line 798, in findsource\r\n raise OSError('could not get source code')\r\n```\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"Build GitHub link of source item, return this link, file location and first line number.\"\"\"\n if isinstance(source_item, commands.HelpCommand):\n src = type(source_item)\n filename = inspect.getsourcefile(src)\n elif isinstance(source_item, commands.Command):\n if source_item.cog_name == \"Alias\":\n cmd_name = source_item.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n src = cmd.callback.__code__\n filename = src.co_filename\n else:\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n filename = inspect.getsourcefile(src)\n\n if not isinstance(source_item, str):\n lines, first_line_no = inspect.getsourcelines(src)\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n if source_object.cog_name == \"Alias\":\n cmd_name = source_object.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n description = cmd.short_doc\n else:\n description = source_object.short_doc\n\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/cogs/source.py"}]} | 2,261 | 423 |
gh_patches_debug_30203 | rasdani/github-patches | git_diff | Mailu__Mailu-2709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PROXY_AUTH_WHITELIST is validating address in REAL_IP_HEADER instead of proxy IP address
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoi…" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
When using a proxy for authentication, we have the option to set proxy IPs/CIDRs to trust for authentication with `PROXY_AUTH_WHITELIST` setting. Whenever we are using a proxy for authentication we are also supposed to set `REAL_IP_HEADER` and `REAL_IP_FROM` so mailu can determine where the request came from when the proxy intercepted it. However, when `REAL_IP_HEADER` and `REAL_IP_FROM` are set, the mailu frontend is validating the IP address in the header from the proxy, rather than the proxy IP itself, for `PROXY_AUTH_WHITELIST`.
I would guess that this has something to do with how nginx is configured for the frontend, as this only happens when BOTH `REAL_IP_FROM` and `PROXY_AUTH_WHITELIST` are configured. If only `PROXY_AUTH_WHITELIST` is set, then the user is permitted to login and access webmail (at increased security risk of IP spoofing).
## Replication Steps
<!--
Steps for replicating your issue
-->
1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.
2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.
3. Set mailu configuration `PROXY_AUTH_WHITELIST` to use the proxy's IP address. Leave `REAL_IP_HEADER` and `REAL_IP_FROM` unset. Restart/redeploy mailu to pick up the new configuration.
4. Confirm authentication through proxy works by navigating to mailu through the proxy, using a different host / source IP address than the one used by the proxy. A successful login will load Webmail.
5. Set mailu configuration `REAL_IP_FROM` to use the proxy's IP address. Set `REAL_IP_HEADER` to a header that the proxy will set, such as `X-Real-IP`. Restart/redeploy mailu to pick up the new configuration.
6. Navigate to mailu through the proxy, using a different host / source IP address than the one used by the proxy. Complete the login if prompted.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Mailu returns with HTTP 500 error, where `X.X.X.X` is the IP address of the host running the browser, found from the header:
```
Internal Server Error
X.X.X.X is not on PROXY_AUTH_WHITELIST
```
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesn’t work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
User should be permitted to login without encountering this error.
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
Logs from the frontend:
```
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail HTTP/1.1" 301 162 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail/ HTTP/1.1" 302 138 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /sso/login?url=/webmail/ HTTP/1.1" 500 155 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
```
</issue>
<code>
[start of core/admin/mailu/sso/views/base.py]
1 from werkzeug.utils import redirect
2 from mailu import models, utils
3 from mailu.sso import sso, forms
4 from mailu.ui import access
5
6 from flask import current_app as app
7 import flask
8 import flask_login
9 import secrets
10 import ipaddress
11 from urllib.parse import urlparse, urljoin
12 from werkzeug.urls import url_unquote
13
14 @sso.route('/login', methods=['GET', 'POST'])
15 def login():
16 if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:
17 return _proxy()
18
19 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
20 form = forms.LoginForm()
21
22 fields = []
23
24 if 'url' in flask.request.args and not 'homepage' in flask.request.url:
25 fields.append(form.submitAdmin)
26 else:
27 form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'
28 form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'
29 if str(app.config["WEBMAIL"]).upper() != "NONE":
30 fields.append(form.submitWebmail)
31 if str(app.config["ADMIN"]).upper() != "FALSE":
32 fields.append(form.submitAdmin)
33 fields = [fields]
34
35 if form.validate_on_submit():
36 if destination := _has_usable_redirect():
37 pass
38 else:
39 if form.submitAdmin.data:
40 destination = app.config['WEB_ADMIN']
41 elif form.submitWebmail.data:
42 destination = app.config['WEB_WEBMAIL']
43 device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))
44 username = form.email.data
45 if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):
46 flask.flash('Too many attempts from your IP (rate-limit)', 'error')
47 return flask.render_template('login.html', form=form, fields=fields)
48 if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):
49 flask.flash('Too many attempts for this user (rate-limit)', 'error')
50 return flask.render_template('login.html', form=form, fields=fields)
51 user = models.User.login(username, form.pw.data)
52 if user:
53 flask.session.regenerate()
54 flask_login.login_user(user)
55 response = flask.redirect(destination)
56 response.set_cookie('rate_limit', utils.limiter.device_cookie(username), max_age=31536000, path=flask.url_for('sso.login'), secure=app.config['SESSION_COOKIE_SECURE'], httponly=True)
57 flask.current_app.logger.info(f'Login succeeded for {username} from {client_ip} pwned={form.pwned.data}.')
58 if msg := utils.isBadOrPwned(form):
59 flask.flash(msg, "error")
60 return response
61 else:
62 utils.limiter.rate_limit_user(username, client_ip, device_cookie, device_cookie_username) if models.User.get(username) else utils.limiter.rate_limit_ip(client_ip, username)
63 flask.current_app.logger.warn(f'Login failed for {username} from {client_ip}.')
64 flask.flash('Wrong e-mail or password', 'error')
65 return flask.render_template('login.html', form=form, fields=fields)
66
67 @sso.route('/logout', methods=['GET'])
68 @access.authenticated
69 def logout():
70 flask_login.logout_user()
71 flask.session.destroy()
72 response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))
73 for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:
74 response.set_cookie(cookie, 'empty', expires=0)
75 return response
76
77 """
78 Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
79 https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
80 """
81 def _has_usable_redirect():
82 if 'homepage' in flask.request.url:
83 return None
84 if url := flask.request.args.get('url'):
85 url = url_unquote(url)
86 target = urlparse(urljoin(flask.request.url, url))
87 if target.netloc == urlparse(flask.request.url).netloc:
88 return target.geturl()
89 return None
90
91 """
92 https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
93 """
94 def _proxy():
95 ip = ipaddress.ip_address(flask.request.remote_addr)
96 if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
97 return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
98
99 email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
100 if not email:
101 return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
102
103 url = _has_usable_redirect() or app.config['WEB_ADMIN']
104
105 user = models.User.get(email)
106 if user:
107 flask.session.regenerate()
108 flask_login.login_user(user)
109 return flask.redirect(url)
110
111 if not app.config['PROXY_AUTH_CREATE']:
112 return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
113
114 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
115 try:
116 localpart, desireddomain = email.rsplit('@')
117 except Exception as e:
118 flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)
119 return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
120 domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\'t exist. Go away! (domain=%s)' % desireddomain)
121 if not domain.max_users == -1 and len(domain.users) >= domain.max_users:
122 flask.current_app.logger.warning('Too many users for domain %s' % domain)
123 return flask.abort(500, 'Too many users in (domain=%s)' % domain)
124 user = models.User(localpart=localpart, domain=domain)
125 user.set_password(secrets.token_urlsafe())
126 models.db.session.add(user)
127 models.db.session.commit()
128 flask.session.regenerate()
129 flask_login.login_user(user)
130 user.send_welcome()
131 flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')
132 return flask.redirect(url)
133
[end of core/admin/mailu/sso/views/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -78,8 +78,8 @@
Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
"""
-def _has_usable_redirect():
- if 'homepage' in flask.request.url:
+def _has_usable_redirect(is_proxied=False):
+ if 'homepage' in flask.request.url and not is_proxied:
return None
if url := flask.request.args.get('url'):
url = url_unquote(url)
@@ -92,15 +92,16 @@
https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
"""
def _proxy():
- ip = ipaddress.ip_address(flask.request.remote_addr)
+ proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)
+ ip = ipaddress.ip_address(proxy_ip)
if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
- return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)
email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
if not email:
return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
- url = _has_usable_redirect() or app.config['WEB_ADMIN']
+ url = _has_usable_redirect(True) or app.config['WEB_ADMIN']
user = models.User.get(email)
if user:
| {"golden_diff": "diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py\n--- a/core/admin/mailu/sso/views/base.py\n+++ b/core/admin/mailu/sso/views/base.py\n@@ -78,8 +78,8 @@\n Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...\n https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html\n \"\"\"\n-def _has_usable_redirect():\n- if 'homepage' in flask.request.url:\n+def _has_usable_redirect(is_proxied=False):\n+ if 'homepage' in flask.request.url and not is_proxied:\n return None\n if url := flask.request.args.get('url'):\n url = url_unquote(url)\n@@ -92,15 +92,16 @@\n https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy\n \"\"\"\n def _proxy():\n- ip = ipaddress.ip_address(flask.request.remote_addr)\n+ proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)\n+ ip = ipaddress.ip_address(proxy_ip)\n if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):\n- return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)\n+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)\n \n email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])\n if not email:\n return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])\n \n- url = _has_usable_redirect() or app.config['WEB_ADMIN']\n+ url = _has_usable_redirect(True) or app.config['WEB_ADMIN']\n \n user = models.User.get(email)\n if user:\n", "issue": "PROXY_AUTH_WHITELIST is validating address in REAL_IP_HEADER instead of proxy IP address\n<!--\r\n\r\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\nBefore you open your issue\r\n- Check if no issue or pull-request for this already exists.\r\n- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- You understand `Mailu` is made by volunteers in their **free time** \u2014 be concise, civil and accept that delays can occur.\r\n- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\nPlease put your text outside of the comment blocks to be visible. You can use the button \"Preview\" above to check.\r\n\r\n-->\r\n\r\n## Environment & Version\r\n\r\n### Environment\r\n\r\n- [x] docker compose\r\n- [ ] kubernetes\r\n- [ ] docker swarm\r\n\r\n### Version\r\n\r\n- Version: `master`\r\n\r\n<!--\r\nTo find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).\r\n\r\n$> docker ps -a | grep mailu\r\n140b09d4b09c mailu/roundcube:1.7 \"docker-php-entrypoi\u2026\" 2 weeks ago Up 2 days (healthy) 80/tcp\r\n$> grep MAILU_VERSION docker-compose.yml mailu.env\r\n-->\r\n\r\n## Description\r\n<!--\r\nFurther explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.\r\n-->\r\nWhen using a proxy for authentication, we have the option to set proxy IPs/CIDRs to trust for authentication with `PROXY_AUTH_WHITELIST` setting. Whenever we are using a proxy for authentication we are also supposed to set `REAL_IP_HEADER` and `REAL_IP_FROM` so mailu can determine where the request came from when the proxy intercepted it. However, when `REAL_IP_HEADER` and `REAL_IP_FROM` are set, the mailu frontend is validating the IP address in the header from the proxy, rather than the proxy IP itself, for `PROXY_AUTH_WHITELIST`.\r\n\r\nI would guess that this has something to do with how nginx is configured for the frontend, as this only happens when BOTH `REAL_IP_FROM` and `PROXY_AUTH_WHITELIST` are configured. If only `PROXY_AUTH_WHITELIST` is set, then the user is permitted to login and access webmail (at increased security risk of IP spoofing).\r\n\r\n## Replication Steps\r\n<!--\r\nSteps for replicating your issue\r\n-->\r\n1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.\r\n2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.\r\n3. Set mailu configuration `PROXY_AUTH_WHITELIST` to use the proxy's IP address. Leave `REAL_IP_HEADER` and `REAL_IP_FROM` unset. Restart/redeploy mailu to pick up the new configuration.\r\n4. Confirm authentication through proxy works by navigating to mailu through the proxy, using a different host / source IP address than the one used by the proxy. A successful login will load Webmail.\r\n5. Set mailu configuration `REAL_IP_FROM` to use the proxy's IP address. Set `REAL_IP_HEADER` to a header that the proxy will set, such as `X-Real-IP`. Restart/redeploy mailu to pick up the new configuration.\r\n6. Navigate to mailu through the proxy, using a different host / source IP address than the one used by the proxy. Complete the login if prompted.\r\n\r\n## Observed behaviour\r\n<!--\r\nExplain or paste the result you received.\r\n-->\r\nMailu returns with HTTP 500 error, where `X.X.X.X` is the IP address of the host running the browser, found from the header:\r\n```\r\nInternal Server Error\r\nX.X.X.X is not on PROXY_AUTH_WHITELIST\r\n```\r\n\r\n## Expected behaviour\r\n<!--\r\nExplain what results you expected - be as specific as possible.\r\nJust saying \"it doesn\u2019t work as expected\" is not useful. It's also helpful to describe what you actually experienced.\r\n-->\r\nUser should be permitted to login without encountering this error.\r\n\r\n## Logs\r\n<!--\r\nOften it is very useful to include log fragments of the involved component.\r\nYou can get the logs via `docker logs <container name> --tail 1000`.\r\nFor example for the admin container: `docker logs mailu_admin_1 --tail 1000`\r\nor using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`\r\n\r\nIf you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:\r\n\r\n```\r\nYour logs here!\r\n```\r\n-->\r\nLogs from the frontend:\r\n```\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /webmail HTTP/1.1\" 301 162 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /webmail/ HTTP/1.1\" 302 138 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /sso/login?url=/webmail/ HTTP/1.1\" 500 155 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n```\n", "before_files": [{"content": "from werkzeug.utils import redirect\nfrom mailu import models, utils\nfrom mailu.sso import sso, forms\nfrom mailu.ui import access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\nimport secrets\nimport ipaddress\nfrom urllib.parse import urlparse, urljoin\nfrom werkzeug.urls import url_unquote\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:\n return _proxy()\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n form = forms.LoginForm()\n\n fields = []\n\n if 'url' in flask.request.args and not 'homepage' in flask.request.url:\n fields.append(form.submitAdmin)\n else:\n form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'\n form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'\n if str(app.config[\"WEBMAIL\"]).upper() != \"NONE\":\n fields.append(form.submitWebmail)\n if str(app.config[\"ADMIN\"]).upper() != \"FALSE\":\n fields.append(form.submitAdmin)\n fields = [fields]\n\n if form.validate_on_submit():\n if destination := _has_usable_redirect():\n pass\n else:\n if form.submitAdmin.data:\n destination = app.config['WEB_ADMIN']\n elif form.submitWebmail.data:\n destination = app.config['WEB_WEBMAIL']\n device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))\n username = form.email.data\n if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):\n flask.flash('Too many attempts from your IP (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):\n flask.flash('Too many attempts for this user (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n user = models.User.login(username, form.pw.data)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n response = flask.redirect(destination)\n response.set_cookie('rate_limit', utils.limiter.device_cookie(username), max_age=31536000, path=flask.url_for('sso.login'), secure=app.config['SESSION_COOKIE_SECURE'], httponly=True)\n flask.current_app.logger.info(f'Login succeeded for {username} from {client_ip} pwned={form.pwned.data}.')\n if msg := utils.isBadOrPwned(form):\n flask.flash(msg, \"error\")\n return response\n else:\n utils.limiter.rate_limit_user(username, client_ip, device_cookie, device_cookie_username) if models.User.get(username) else utils.limiter.rate_limit_ip(client_ip, username)\n flask.current_app.logger.warn(f'Login failed for {username} from {client_ip}.')\n flask.flash('Wrong e-mail or password', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n\[email protected]('/logout', methods=['GET'])\[email protected]\ndef logout():\n flask_login.logout_user()\n flask.session.destroy()\n response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))\n for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:\n response.set_cookie(cookie, 'empty', expires=0)\n return response\n\n\"\"\"\nRedirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...\nhttps://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html\n\"\"\"\ndef _has_usable_redirect():\n if 'homepage' in flask.request.url:\n return None\n if url := flask.request.args.get('url'):\n url = url_unquote(url)\n target = urlparse(urljoin(flask.request.url, url))\n if target.netloc == urlparse(flask.request.url).netloc:\n return target.geturl()\n return None\n\n\"\"\"\nhttps://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy\n\"\"\"\ndef _proxy():\n ip = ipaddress.ip_address(flask.request.remote_addr)\n if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):\n return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)\n\n email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])\n if not email:\n return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])\n\n url = _has_usable_redirect() or app.config['WEB_ADMIN']\n\n user = models.User.get(email)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n return flask.redirect(url)\n\n if not app.config['PROXY_AUTH_CREATE']:\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n try:\n localpart, desireddomain = email.rsplit('@')\n except Exception as e:\n flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\\'t exist. Go away! (domain=%s)' % desireddomain)\n if not domain.max_users == -1 and len(domain.users) >= domain.max_users:\n flask.current_app.logger.warning('Too many users for domain %s' % domain)\n return flask.abort(500, 'Too many users in (domain=%s)' % domain)\n user = models.User(localpart=localpart, domain=domain)\n user.set_password(secrets.token_urlsafe())\n models.db.session.add(user)\n models.db.session.commit()\n flask.session.regenerate()\n flask_login.login_user(user)\n user.send_welcome()\n flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')\n return flask.redirect(url)\n", "path": "core/admin/mailu/sso/views/base.py"}]} | 3,738 | 439 |
gh_patches_debug_28883 | rasdani/github-patches | git_diff | python-discord__bot-219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
It should be mandatory to add a reason for !bb watch
Leaving it optional means we don't always know why someone was watched. This is important information, so we should make it mandatory instead.
</issue>
<code>
[start of bot/cogs/bigbrother.py]
1 import asyncio
2 import logging
3 import re
4 from collections import defaultdict, deque
5 from typing import List, Union
6
7 from discord import Color, Embed, Guild, Member, Message, TextChannel, User
8 from discord.ext.commands import Bot, Context, group
9
10 from bot.constants import BigBrother as BigBrotherConfig, Channels, Emojis, Guild as GuildConfig, Keys, Roles, URLs
11 from bot.decorators import with_role
12 from bot.pagination import LinePaginator
13 from bot.utils import messages
14 from bot.utils.moderation import post_infraction
15
16 log = logging.getLogger(__name__)
17
18 URL_RE = re.compile(r"(https?://[^\s]+)")
19
20
21 class BigBrother:
22 """User monitoring to assist with moderation."""
23
24 HEADERS = {'X-API-Key': Keys.site_api}
25
26 def __init__(self, bot: Bot):
27 self.bot = bot
28 self.watched_users = {} # { user_id: log_channel_id }
29 self.channel_queues = defaultdict(lambda: defaultdict(deque)) # { user_id: { channel_id: queue(messages) }
30 self.last_log = [None, None, 0] # [user_id, channel_id, message_count]
31 self.consuming = False
32
33 self.bot.loop.create_task(self.get_watched_users())
34
35 def update_cache(self, api_response: List[dict]):
36 """
37 Updates the internal cache of watched users from the given `api_response`.
38 This function will only add (or update) existing keys, it will not delete
39 keys that were not present in the API response.
40 A user is only added if the bot can find a channel
41 with the given `channel_id` in its channel cache.
42 """
43
44 for entry in api_response:
45 user_id = int(entry['user_id'])
46 channel_id = int(entry['channel_id'])
47 channel = self.bot.get_channel(channel_id)
48
49 if channel is not None:
50 self.watched_users[user_id] = channel
51 else:
52 log.error(
53 f"Site specified to relay messages by `{user_id}` in `{channel_id}`, "
54 "but the given channel could not be found. Ignoring."
55 )
56
57 async def get_watched_users(self):
58 """Retrieves watched users from the API."""
59
60 await self.bot.wait_until_ready()
61 async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:
62 data = await response.json()
63 self.update_cache(data)
64
65 async def on_member_ban(self, guild: Guild, user: Union[User, Member]):
66 if guild.id == GuildConfig.id and user.id in self.watched_users:
67 url = f"{URLs.site_bigbrother_api}?user_id={user.id}"
68 channel = self.watched_users[user.id]
69
70 async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:
71 del self.watched_users[user.id]
72 del self.channel_queues[user.id]
73 if response.status == 204:
74 await channel.send(
75 f"{Emojis.bb_message}:hammer: {user} got banned, so "
76 f"`BigBrother` will no longer relay their messages to {channel}"
77 )
78
79 else:
80 data = await response.json()
81 reason = data.get('error_message', "no message provided")
82 await channel.send(
83 f"{Emojis.bb_message}:x: {user} got banned, but trying to remove them from"
84 f"BigBrother's user dictionary on the API returned an error: {reason}"
85 )
86
87 async def on_message(self, msg: Message):
88 """Queues up messages sent by watched users."""
89
90 if msg.author.id in self.watched_users:
91 if not self.consuming:
92 self.bot.loop.create_task(self.consume_messages())
93
94 log.trace(f"Received message: {msg.content} ({len(msg.attachments)} attachments)")
95 self.channel_queues[msg.author.id][msg.channel.id].append(msg)
96
97 async def consume_messages(self):
98 """Consumes the message queues to log watched users' messages."""
99
100 if not self.consuming:
101 self.consuming = True
102 log.trace("Sleeping before consuming...")
103 await asyncio.sleep(BigBrotherConfig.log_delay)
104
105 log.trace("Begin consuming messages.")
106 channel_queues = self.channel_queues.copy()
107 self.channel_queues.clear()
108 for user_id, queues in channel_queues.items():
109 for _, queue in queues.items():
110 channel = self.watched_users[user_id]
111 while queue:
112 msg = queue.popleft()
113 log.trace(f"Consuming message: {msg.clean_content} ({len(msg.attachments)} attachments)")
114
115 self.last_log[2] += 1 # Increment message count.
116 await self.send_header(msg, channel)
117 await self.log_message(msg, channel)
118
119 if self.channel_queues:
120 log.trace("Queue not empty; continue consumption.")
121 self.bot.loop.create_task(self.consume_messages())
122 else:
123 log.trace("Done consuming messages.")
124 self.consuming = False
125
126 async def send_header(self, message: Message, destination: TextChannel):
127 """
128 Sends a log message header to the given channel.
129
130 A header is only sent if the user or channel are different than the previous, or if the configured message
131 limit for a single header has been exceeded.
132
133 :param message: the first message in the queue
134 :param destination: the channel in which to send the header
135 """
136
137 last_user, last_channel, msg_count = self.last_log
138 limit = BigBrotherConfig.header_message_limit
139
140 # Send header if user/channel are different or if message limit exceeded.
141 if message.author.id != last_user or message.channel.id != last_channel or msg_count > limit:
142 self.last_log = [message.author.id, message.channel.id, 0]
143
144 embed = Embed(description=f"{message.author.mention} in [#{message.channel.name}]({message.jump_url})")
145 embed.set_author(name=message.author.nick or message.author.name, icon_url=message.author.avatar_url)
146 await destination.send(embed=embed)
147
148 @staticmethod
149 async def log_message(message: Message, destination: TextChannel):
150 """
151 Logs a watched user's message in the given channel.
152
153 Attachments are also sent. All non-image or non-video URLs are put in inline code blocks to prevent preview
154 embeds from being automatically generated.
155
156 :param message: the message to log
157 :param destination: the channel in which to log the message
158 """
159
160 content = message.clean_content
161 if content:
162 # Put all non-media URLs in inline code blocks.
163 media_urls = {embed.url for embed in message.embeds if embed.type in ("image", "video")}
164 for url in URL_RE.findall(content):
165 if url not in media_urls:
166 content = content.replace(url, f"`{url}`")
167
168 await destination.send(content)
169
170 await messages.send_attachments(message, destination)
171
172 @group(name='bigbrother', aliases=('bb',), invoke_without_command=True)
173 @with_role(Roles.owner, Roles.admin, Roles.moderator)
174 async def bigbrother_group(self, ctx: Context):
175 """Monitor users, NSA-style."""
176
177 await ctx.invoke(self.bot.get_command("help"), "bigbrother")
178
179 @bigbrother_group.command(name='watched', aliases=('all',))
180 @with_role(Roles.owner, Roles.admin, Roles.moderator)
181 async def watched_command(self, ctx: Context, from_cache: bool = True):
182 """
183 Shows all users that are currently monitored and in which channel.
184 By default, the users are returned from the cache.
185 If this is not desired, `from_cache` can be given as a falsy value, e.g. e.g. 'no'.
186 """
187
188 if from_cache:
189 lines = tuple(
190 f"• <@{user_id}> in <#{self.watched_users[user_id].id}>"
191 for user_id in self.watched_users
192 )
193 await LinePaginator.paginate(
194 lines or ("There's nothing here yet.",),
195 ctx,
196 Embed(title="Watched users (cached)", color=Color.blue()),
197 empty=False
198 )
199
200 else:
201 async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:
202 if response.status == 200:
203 data = await response.json()
204 self.update_cache(data)
205 lines = tuple(f"• <@{entry['user_id']}> in <#{entry['channel_id']}>" for entry in data)
206
207 await LinePaginator.paginate(
208 lines or ("There's nothing here yet.",),
209 ctx,
210 Embed(title="Watched users", color=Color.blue()),
211 empty=False
212 )
213
214 else:
215 await ctx.send(f":x: got non-200 response from the API")
216
217 @bigbrother_group.command(name='watch', aliases=('w',))
218 @with_role(Roles.owner, Roles.admin, Roles.moderator)
219 async def watch_command(self, ctx: Context, user: User, *, reason: str = None):
220 """
221 Relay messages sent by the given `user` to the `#big-brother-logs` channel
222
223 If a `reason` is specified, a note is added for `user`
224 """
225
226 channel_id = Channels.big_brother_logs
227
228 post_data = {
229 'user_id': str(user.id),
230 'channel_id': str(channel_id)
231 }
232
233 async with self.bot.http_session.post(
234 URLs.site_bigbrother_api,
235 headers=self.HEADERS,
236 json=post_data
237 ) as response:
238 if response.status == 204:
239 await ctx.send(f":ok_hand: will now relay messages sent by {user} in <#{channel_id}>")
240
241 channel = self.bot.get_channel(channel_id)
242 if channel is None:
243 log.error(
244 f"could not update internal cache, failed to find a channel with ID {channel_id}"
245 )
246 else:
247 self.watched_users[user.id] = channel
248
249 else:
250 data = await response.json()
251 reason = data.get('error_message', "no message provided")
252 await ctx.send(f":x: the API returned an error: {reason}")
253
254 # Add a note (shadow warning) if a reason is specified
255 if reason:
256 reason = "bb watch: " + reason # Prepend for situational awareness
257 await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
258
259 @bigbrother_group.command(name='unwatch', aliases=('uw',))
260 @with_role(Roles.owner, Roles.admin, Roles.moderator)
261 async def unwatch_command(self, ctx: Context, user: User):
262 """Stop relaying messages by the given `user`."""
263
264 url = f"{URLs.site_bigbrother_api}?user_id={user.id}"
265 async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:
266 if response.status == 204:
267 await ctx.send(f":ok_hand: will no longer relay messages sent by {user}")
268
269 if user.id in self.watched_users:
270 del self.watched_users[user.id]
271 if user.id in self.channel_queues:
272 del self.channel_queues[user.id]
273 else:
274 log.warning(f"user {user.id} was unwatched but was not found in the cache")
275
276 else:
277 data = await response.json()
278 reason = data.get('error_message', "no message provided")
279 await ctx.send(f":x: the API returned an error: {reason}")
280
281
282 def setup(bot: Bot):
283 bot.add_cog(BigBrother(bot))
284 log.info("Cog loaded: BigBrother")
285
[end of bot/cogs/bigbrother.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/bigbrother.py b/bot/cogs/bigbrother.py
--- a/bot/cogs/bigbrother.py
+++ b/bot/cogs/bigbrother.py
@@ -216,11 +216,12 @@
@bigbrother_group.command(name='watch', aliases=('w',))
@with_role(Roles.owner, Roles.admin, Roles.moderator)
- async def watch_command(self, ctx: Context, user: User, *, reason: str = None):
+ async def watch_command(self, ctx: Context, user: User, *, reason: str):
"""
Relay messages sent by the given `user` to the `#big-brother-logs` channel
- If a `reason` is specified, a note is added for `user`
+ A `reason` for watching is required, which is added for the user to be watched as a
+ note (aka: shadow warning)
"""
channel_id = Channels.big_brother_logs
@@ -251,10 +252,9 @@
reason = data.get('error_message', "no message provided")
await ctx.send(f":x: the API returned an error: {reason}")
- # Add a note (shadow warning) if a reason is specified
- if reason:
- reason = "bb watch: " + reason # Prepend for situational awareness
- await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
+ # Add a note (shadow warning) with the reason for watching
+ reason = "bb watch: " + reason # Prepend for situational awareness
+ await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
@bigbrother_group.command(name='unwatch', aliases=('uw',))
@with_role(Roles.owner, Roles.admin, Roles.moderator)
| {"golden_diff": "diff --git a/bot/cogs/bigbrother.py b/bot/cogs/bigbrother.py\n--- a/bot/cogs/bigbrother.py\n+++ b/bot/cogs/bigbrother.py\n@@ -216,11 +216,12 @@\n \n @bigbrother_group.command(name='watch', aliases=('w',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n- async def watch_command(self, ctx: Context, user: User, *, reason: str = None):\n+ async def watch_command(self, ctx: Context, user: User, *, reason: str):\n \"\"\"\n Relay messages sent by the given `user` to the `#big-brother-logs` channel\n \n- If a `reason` is specified, a note is added for `user`\n+ A `reason` for watching is required, which is added for the user to be watched as a\n+ note (aka: shadow warning)\n \"\"\"\n \n channel_id = Channels.big_brother_logs\n@@ -251,10 +252,9 @@\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n \n- # Add a note (shadow warning) if a reason is specified\n- if reason:\n- reason = \"bb watch: \" + reason # Prepend for situational awareness\n- await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n+ # Add a note (shadow warning) with the reason for watching\n+ reason = \"bb watch: \" + reason # Prepend for situational awareness\n+ await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n \n @bigbrother_group.command(name='unwatch', aliases=('uw',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n", "issue": "It should be mandatory to add a reason for !bb watch\nLeaving it optional means we don't always know why someone was watched. This is important information, so we should make it mandatory instead.\n", "before_files": [{"content": "import asyncio\nimport logging\nimport re\nfrom collections import defaultdict, deque\nfrom typing import List, Union\n\nfrom discord import Color, Embed, Guild, Member, Message, TextChannel, User\nfrom discord.ext.commands import Bot, Context, group\n\nfrom bot.constants import BigBrother as BigBrotherConfig, Channels, Emojis, Guild as GuildConfig, Keys, Roles, URLs\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\nfrom bot.utils.moderation import post_infraction\n\nlog = logging.getLogger(__name__)\n\nURL_RE = re.compile(r\"(https?://[^\\s]+)\")\n\n\nclass BigBrother:\n \"\"\"User monitoring to assist with moderation.\"\"\"\n\n HEADERS = {'X-API-Key': Keys.site_api}\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.watched_users = {} # { user_id: log_channel_id }\n self.channel_queues = defaultdict(lambda: defaultdict(deque)) # { user_id: { channel_id: queue(messages) }\n self.last_log = [None, None, 0] # [user_id, channel_id, message_count]\n self.consuming = False\n\n self.bot.loop.create_task(self.get_watched_users())\n\n def update_cache(self, api_response: List[dict]):\n \"\"\"\n Updates the internal cache of watched users from the given `api_response`.\n This function will only add (or update) existing keys, it will not delete\n keys that were not present in the API response.\n A user is only added if the bot can find a channel\n with the given `channel_id` in its channel cache.\n \"\"\"\n\n for entry in api_response:\n user_id = int(entry['user_id'])\n channel_id = int(entry['channel_id'])\n channel = self.bot.get_channel(channel_id)\n\n if channel is not None:\n self.watched_users[user_id] = channel\n else:\n log.error(\n f\"Site specified to relay messages by `{user_id}` in `{channel_id}`, \"\n \"but the given channel could not be found. Ignoring.\"\n )\n\n async def get_watched_users(self):\n \"\"\"Retrieves watched users from the API.\"\"\"\n\n await self.bot.wait_until_ready()\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n data = await response.json()\n self.update_cache(data)\n\n async def on_member_ban(self, guild: Guild, user: Union[User, Member]):\n if guild.id == GuildConfig.id and user.id in self.watched_users:\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n channel = self.watched_users[user.id]\n\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n del self.watched_users[user.id]\n del self.channel_queues[user.id]\n if response.status == 204:\n await channel.send(\n f\"{Emojis.bb_message}:hammer: {user} got banned, so \"\n f\"`BigBrother` will no longer relay their messages to {channel}\"\n )\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await channel.send(\n f\"{Emojis.bb_message}:x: {user} got banned, but trying to remove them from\"\n f\"BigBrother's user dictionary on the API returned an error: {reason}\"\n )\n\n async def on_message(self, msg: Message):\n \"\"\"Queues up messages sent by watched users.\"\"\"\n\n if msg.author.id in self.watched_users:\n if not self.consuming:\n self.bot.loop.create_task(self.consume_messages())\n\n log.trace(f\"Received message: {msg.content} ({len(msg.attachments)} attachments)\")\n self.channel_queues[msg.author.id][msg.channel.id].append(msg)\n\n async def consume_messages(self):\n \"\"\"Consumes the message queues to log watched users' messages.\"\"\"\n\n if not self.consuming:\n self.consuming = True\n log.trace(\"Sleeping before consuming...\")\n await asyncio.sleep(BigBrotherConfig.log_delay)\n\n log.trace(\"Begin consuming messages.\")\n channel_queues = self.channel_queues.copy()\n self.channel_queues.clear()\n for user_id, queues in channel_queues.items():\n for _, queue in queues.items():\n channel = self.watched_users[user_id]\n while queue:\n msg = queue.popleft()\n log.trace(f\"Consuming message: {msg.clean_content} ({len(msg.attachments)} attachments)\")\n\n self.last_log[2] += 1 # Increment message count.\n await self.send_header(msg, channel)\n await self.log_message(msg, channel)\n\n if self.channel_queues:\n log.trace(\"Queue not empty; continue consumption.\")\n self.bot.loop.create_task(self.consume_messages())\n else:\n log.trace(\"Done consuming messages.\")\n self.consuming = False\n\n async def send_header(self, message: Message, destination: TextChannel):\n \"\"\"\n Sends a log message header to the given channel.\n\n A header is only sent if the user or channel are different than the previous, or if the configured message\n limit for a single header has been exceeded.\n\n :param message: the first message in the queue\n :param destination: the channel in which to send the header\n \"\"\"\n\n last_user, last_channel, msg_count = self.last_log\n limit = BigBrotherConfig.header_message_limit\n\n # Send header if user/channel are different or if message limit exceeded.\n if message.author.id != last_user or message.channel.id != last_channel or msg_count > limit:\n self.last_log = [message.author.id, message.channel.id, 0]\n\n embed = Embed(description=f\"{message.author.mention} in [#{message.channel.name}]({message.jump_url})\")\n embed.set_author(name=message.author.nick or message.author.name, icon_url=message.author.avatar_url)\n await destination.send(embed=embed)\n\n @staticmethod\n async def log_message(message: Message, destination: TextChannel):\n \"\"\"\n Logs a watched user's message in the given channel.\n\n Attachments are also sent. All non-image or non-video URLs are put in inline code blocks to prevent preview\n embeds from being automatically generated.\n\n :param message: the message to log\n :param destination: the channel in which to log the message\n \"\"\"\n\n content = message.clean_content\n if content:\n # Put all non-media URLs in inline code blocks.\n media_urls = {embed.url for embed in message.embeds if embed.type in (\"image\", \"video\")}\n for url in URL_RE.findall(content):\n if url not in media_urls:\n content = content.replace(url, f\"`{url}`\")\n\n await destination.send(content)\n\n await messages.send_attachments(message, destination)\n\n @group(name='bigbrother', aliases=('bb',), invoke_without_command=True)\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def bigbrother_group(self, ctx: Context):\n \"\"\"Monitor users, NSA-style.\"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"bigbrother\")\n\n @bigbrother_group.command(name='watched', aliases=('all',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watched_command(self, ctx: Context, from_cache: bool = True):\n \"\"\"\n Shows all users that are currently monitored and in which channel.\n By default, the users are returned from the cache.\n If this is not desired, `from_cache` can be given as a falsy value, e.g. e.g. 'no'.\n \"\"\"\n\n if from_cache:\n lines = tuple(\n f\"\u2022 <@{user_id}> in <#{self.watched_users[user_id].id}>\"\n for user_id in self.watched_users\n )\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users (cached)\", color=Color.blue()),\n empty=False\n )\n\n else:\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n if response.status == 200:\n data = await response.json()\n self.update_cache(data)\n lines = tuple(f\"\u2022 <@{entry['user_id']}> in <#{entry['channel_id']}>\" for entry in data)\n\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users\", color=Color.blue()),\n empty=False\n )\n\n else:\n await ctx.send(f\":x: got non-200 response from the API\")\n\n @bigbrother_group.command(name='watch', aliases=('w',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watch_command(self, ctx: Context, user: User, *, reason: str = None):\n \"\"\"\n Relay messages sent by the given `user` to the `#big-brother-logs` channel\n\n If a `reason` is specified, a note is added for `user`\n \"\"\"\n\n channel_id = Channels.big_brother_logs\n\n post_data = {\n 'user_id': str(user.id),\n 'channel_id': str(channel_id)\n }\n\n async with self.bot.http_session.post(\n URLs.site_bigbrother_api,\n headers=self.HEADERS,\n json=post_data\n ) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will now relay messages sent by {user} in <#{channel_id}>\")\n\n channel = self.bot.get_channel(channel_id)\n if channel is None:\n log.error(\n f\"could not update internal cache, failed to find a channel with ID {channel_id}\"\n )\n else:\n self.watched_users[user.id] = channel\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n # Add a note (shadow warning) if a reason is specified\n if reason:\n reason = \"bb watch: \" + reason # Prepend for situational awareness\n await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n\n @bigbrother_group.command(name='unwatch', aliases=('uw',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def unwatch_command(self, ctx: Context, user: User):\n \"\"\"Stop relaying messages by the given `user`.\"\"\"\n\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will no longer relay messages sent by {user}\")\n\n if user.id in self.watched_users:\n del self.watched_users[user.id]\n if user.id in self.channel_queues:\n del self.channel_queues[user.id]\n else:\n log.warning(f\"user {user.id} was unwatched but was not found in the cache\")\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n\ndef setup(bot: Bot):\n bot.add_cog(BigBrother(bot))\n log.info(\"Cog loaded: BigBrother\")\n", "path": "bot/cogs/bigbrother.py"}]} | 3,859 | 424 |
gh_patches_debug_38915 | rasdani/github-patches | git_diff | lisa-lab__pylearn2-1512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
speed up NanGuardMode on GPU and move to Theano.
This can be done as in gh-1054. Do the reduction on the GPU, then this will transfer much less data.
The CudaNdarray object do not support many reduction, but we can compile a Theano function that take a gpu object, do the reduction and return the result on the CPU to inspect it.
</issue>
<code>
[start of pylearn2/devtools/nan_guard.py]
1 """
2 Functionality for detecting NaNs in a Theano graph.
3 """
4 __authors__ = "Ian Goodfellow"
5 __copyright__ = "Copyright 2010-2012, Universite de Montreal"
6 __credits__ = ["Ian Goodfellow"]
7 __license__ = "3-clause BSD"
8 __maintainer__ = "LISA Lab"
9 __email__ = "pylearn-dev@googlegroups"
10
11 import logging
12 from theano.compile import Mode
13 import theano
14 import numpy as np
15 from pylearn2.models.dbm import flatten
16 from pylearn2.utils import contains_nan, contains_inf
17
18
19 logger = logging.getLogger(__name__)
20
21
22 class NanGuardMode(Mode):
23 """
24 A Theano compilation Mode that makes the compiled function automatically
25 detect NaNs and Infs and detect an error if they occur.
26
27 Parameters
28 ----------
29 nan_is_error : bool
30 If True, raise an error anytime a NaN is encountered
31 inf_is_error: bool
32 If True, raise an error anytime an Inf is encountered. Note that some
33 pylearn2 modules currently use np.inf as a default value (e.g.
34 mlp.max_pool) and these will cause an error if inf_is_error is True.
35 big_is_error: bool
36 If True, raise an error when a value greater than 1e10 is encountered.
37 """
38 def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
39 def do_check_on(var, nd, f, is_input):
40 """
41 Checks `var` for NaNs / Infs. If detected, raises an exception
42 and / or prints information about `nd`, `f`, and `is_input` to
43 help the user determine the cause of the invalid values.
44
45 Parameters
46 ----------
47 var : numpy.ndarray
48 The value to be checked.
49 nd : theano.gof.Apply
50 The Apply node being executed
51 f : callable
52 The thunk for the apply node
53 is_input : bool
54 If True, `var` is an input to `nd`.
55 If False, it is an output.
56 """
57 error = False
58 if nan_is_error:
59 if contains_nan(var):
60 logger.error('NaN detected')
61 error = True
62 if inf_is_error:
63 if contains_inf(var):
64 logger.error('Inf detected')
65 error = True
66 if big_is_error:
67 if np.abs(var).max() > 1e10:
68 logger.error('Big value detected')
69 error = True
70 if error:
71 if is_input:
72 logger.error('In an input')
73 else:
74 logger.error('In an output')
75 logger.error('Inputs: ')
76 for ivar, ival in zip(nd.inputs, f.inputs):
77 logger.error('var')
78 logger.error(ivar)
79 logger.error(theano.printing.min_informative_str(ivar))
80 logger.error('val')
81 logger.error(ival)
82 logger.error('Node:')
83 logger.error(nd)
84 assert False
85
86 def nan_check(i, node, fn):
87 """
88 Runs `fn` while checking its inputs and outputs for NaNs / Infs
89
90 Parameters
91 ----------
92 i : currently ignored (TODO: determine why it is here or remove)
93 node : theano.gof.Apply
94 The Apply node currently being executed
95 fn : callable
96 The thunk to execute for this Apply node
97 """
98 inputs = fn.inputs
99 # TODO: figure out why individual inputs are themselves lists sometimes
100 for x in flatten(inputs):
101 do_check_on(x, node, fn, True)
102 fn()
103 outputs = fn.outputs
104 for j, x in enumerate(flatten(outputs)):
105 do_check_on(x, node, fn, False)
106
107 wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()], [nan_check])
108 super(NanGuardMode, self).__init__(wrap_linker, optimizer=theano.config.optimizer)
109
[end of pylearn2/devtools/nan_guard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pylearn2/devtools/nan_guard.py b/pylearn2/devtools/nan_guard.py
--- a/pylearn2/devtools/nan_guard.py
+++ b/pylearn2/devtools/nan_guard.py
@@ -11,6 +11,8 @@
import logging
from theano.compile import Mode
import theano
+import theano.tensor as T
+import theano.sandbox.cuda as cuda
import numpy as np
from pylearn2.models.dbm import flatten
from pylearn2.utils import contains_nan, contains_inf
@@ -36,6 +38,23 @@
If True, raise an error when a value greater than 1e10 is encountered.
"""
def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
+ if cuda.cuda_available:
+ self.guard_input = cuda.fvector('nan_guard')
+ if nan_is_error or inf_is_error:
+ self.gpumin = theano.function(
+ [self.guard_input], T.min(self.guard_input),
+ mode='FAST_RUN'
+ )
+ if inf_is_error:
+ self.gpumax = theano.function(
+ [self.guard_input], T.max(self.guard_input),
+ mode='FAST_RUN'
+ )
+ if big_is_error:
+ self.gpuabsmax = theano.function(
+ [self.guard_input], T.max(T.abs_(self.guard_input)),
+ mode='FAST_RUN'
+ )
def do_check_on(var, nd, f, is_input):
"""
Checks `var` for NaNs / Infs. If detected, raises an exception
@@ -56,15 +75,31 @@
"""
error = False
if nan_is_error:
- if contains_nan(var):
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = np.isnan(self.gpumin(var.reshape(var.size)))
+ else:
+ err = contains_nan(var)
+ if err:
logger.error('NaN detected')
error = True
if inf_is_error:
- if contains_inf(var):
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = (np.isinf(self.gpumin(var.reshape(var.size))) or \
+ np.isinf(self.gpumax(var.reshape(var.size))))
+ else:
+ err = contains_inf(var)
+ if err:
logger.error('Inf detected')
error = True
if big_is_error:
- if np.abs(var).max() > 1e10:
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)
+ else:
+ err = (np.abs(var).max() > 1e10)
+ if err:
logger.error('Big value detected')
error = True
if error:
| {"golden_diff": "diff --git a/pylearn2/devtools/nan_guard.py b/pylearn2/devtools/nan_guard.py\n--- a/pylearn2/devtools/nan_guard.py\n+++ b/pylearn2/devtools/nan_guard.py\n@@ -11,6 +11,8 @@\n import logging\n from theano.compile import Mode\n import theano\n+import theano.tensor as T\n+import theano.sandbox.cuda as cuda\n import numpy as np\n from pylearn2.models.dbm import flatten\n from pylearn2.utils import contains_nan, contains_inf\n@@ -36,6 +38,23 @@\n If True, raise an error when a value greater than 1e10 is encountered.\n \"\"\"\n def __init__(self, nan_is_error, inf_is_error, big_is_error=True):\n+ if cuda.cuda_available:\n+ self.guard_input = cuda.fvector('nan_guard')\n+ if nan_is_error or inf_is_error:\n+ self.gpumin = theano.function(\n+ [self.guard_input], T.min(self.guard_input),\n+ mode='FAST_RUN'\n+ )\n+ if inf_is_error:\n+ self.gpumax = theano.function(\n+ [self.guard_input], T.max(self.guard_input),\n+ mode='FAST_RUN'\n+ )\n+ if big_is_error:\n+ self.gpuabsmax = theano.function(\n+ [self.guard_input], T.max(T.abs_(self.guard_input)),\n+ mode='FAST_RUN'\n+ )\n def do_check_on(var, nd, f, is_input):\n \"\"\"\n Checks `var` for NaNs / Infs. If detected, raises an exception\n@@ -56,15 +75,31 @@\n \"\"\"\n error = False\n if nan_is_error:\n- if contains_nan(var):\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = np.isnan(self.gpumin(var.reshape(var.size)))\n+ else:\n+ err = contains_nan(var)\n+ if err:\n logger.error('NaN detected')\n error = True\n if inf_is_error:\n- if contains_inf(var):\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = (np.isinf(self.gpumin(var.reshape(var.size))) or \\\n+ np.isinf(self.gpumax(var.reshape(var.size))))\n+ else:\n+ err = contains_inf(var)\n+ if err:\n logger.error('Inf detected')\n error = True\n if big_is_error:\n- if np.abs(var).max() > 1e10:\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)\n+ else:\n+ err = (np.abs(var).max() > 1e10)\n+ if err:\n logger.error('Big value detected')\n error = True\n if error:\n", "issue": "speed up NanGuardMode on GPU and move to Theano.\nThis can be done as in gh-1054. Do the reduction on the GPU, then this will transfer much less data.\n\nThe CudaNdarray object do not support many reduction, but we can compile a Theano function that take a gpu object, do the reduction and return the result on the CPU to inspect it.\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for detecting NaNs in a Theano graph.\n\"\"\"\n__authors__ = \"Ian Goodfellow\"\n__copyright__ = \"Copyright 2010-2012, Universite de Montreal\"\n__credits__ = [\"Ian Goodfellow\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"LISA Lab\"\n__email__ = \"pylearn-dev@googlegroups\"\n\nimport logging\nfrom theano.compile import Mode\nimport theano\nimport numpy as np\nfrom pylearn2.models.dbm import flatten\nfrom pylearn2.utils import contains_nan, contains_inf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NanGuardMode(Mode):\n \"\"\"\n A Theano compilation Mode that makes the compiled function automatically\n detect NaNs and Infs and detect an error if they occur.\n\n Parameters\n ----------\n nan_is_error : bool\n If True, raise an error anytime a NaN is encountered\n inf_is_error: bool\n If True, raise an error anytime an Inf is encountered. Note that some\n pylearn2 modules currently use np.inf as a default value (e.g.\n mlp.max_pool) and these will cause an error if inf_is_error is True.\n big_is_error: bool\n If True, raise an error when a value greater than 1e10 is encountered.\n \"\"\"\n def __init__(self, nan_is_error, inf_is_error, big_is_error=True):\n def do_check_on(var, nd, f, is_input):\n \"\"\"\n Checks `var` for NaNs / Infs. If detected, raises an exception\n and / or prints information about `nd`, `f`, and `is_input` to\n help the user determine the cause of the invalid values.\n\n Parameters\n ----------\n var : numpy.ndarray\n The value to be checked.\n nd : theano.gof.Apply\n The Apply node being executed\n f : callable\n The thunk for the apply node\n is_input : bool\n If True, `var` is an input to `nd`.\n If False, it is an output.\n \"\"\"\n error = False\n if nan_is_error:\n if contains_nan(var):\n logger.error('NaN detected')\n error = True\n if inf_is_error:\n if contains_inf(var):\n logger.error('Inf detected')\n error = True\n if big_is_error:\n if np.abs(var).max() > 1e10:\n logger.error('Big value detected')\n error = True\n if error:\n if is_input:\n logger.error('In an input')\n else:\n logger.error('In an output')\n logger.error('Inputs: ')\n for ivar, ival in zip(nd.inputs, f.inputs):\n logger.error('var')\n logger.error(ivar)\n logger.error(theano.printing.min_informative_str(ivar))\n logger.error('val')\n logger.error(ival)\n logger.error('Node:')\n logger.error(nd)\n assert False\n\n def nan_check(i, node, fn):\n \"\"\"\n Runs `fn` while checking its inputs and outputs for NaNs / Infs\n\n Parameters\n ----------\n i : currently ignored (TODO: determine why it is here or remove)\n node : theano.gof.Apply\n The Apply node currently being executed\n fn : callable\n The thunk to execute for this Apply node\n \"\"\"\n inputs = fn.inputs\n # TODO: figure out why individual inputs are themselves lists sometimes\n for x in flatten(inputs):\n do_check_on(x, node, fn, True)\n fn()\n outputs = fn.outputs\n for j, x in enumerate(flatten(outputs)):\n do_check_on(x, node, fn, False)\n\n wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()], [nan_check])\n super(NanGuardMode, self).__init__(wrap_linker, optimizer=theano.config.optimizer)\n", "path": "pylearn2/devtools/nan_guard.py"}]} | 1,705 | 666 |
gh_patches_debug_3494 | rasdani/github-patches | git_diff | wagtail__wagtail-10939 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Getting a "cannot pickle" error in Wagtail 5.1 when when using a custom image model
<!--
Found a bug? Please fill out the sections below. 👍
-->
### Issue Summary
I recently upgraded four small sites to Wagtail 5.1. None of these sites have ever used caching, and each site uses a custom image model. On all but one site I started getting the error: ` cannot pickle '_io.BufferedReader' object` every place an `{% image ... %}` tag is used. All of these sites have been working fine for several years. (As an aside, I see that in `Image.get_rendition()`, a `try...except` was removed in 5.1 that would have probably been masking any caching-related errors that might have occurred before 5.1.) The pickling error occurs both when caching is absent and when caching (Redis) is used. The `'_io.BufferedReader' object` error occurs in Django's `locmem.py` caching when no cache is specified in the settings, and a different pickling error message occurs when Redis is used, but in both cases pickling is the problem.
To test from the command line, I inserted a print statement in `locmem.py` to print out the pickled value. I then looped through the images calling `get_rendition()`. All pickled values printed correctly. To test a specific image from the command line, I uploaded a new image to the `logo_admin` field described below and did:
```
r=CustomRendition.objects.get(pk=63)
r.image.get_rendition(r.image_spec)
```
The pickled value was printed correctly. However, when this exact same image/rendition was called from an `{% image ... %}` tag in the admin as described below, the pickling error happens. The `CustomImage(AbstractImage)/CustomRendition(AbstractRendition)` definitions are the same in each of these projects and are set up [as described in the docs](https://docs.wagtail.org/en/latest/advanced_topics/images/custom_image_model.html#custom-image-models).
The easiest way to test is from the admin as described above and in **Steps to Reproduce** below. I also tried:
- deleting all existing renditions
- uploading a new image to the `logo_admin` field shown below. Again, when I comment out the `{% image settings.main.SiteSettings.logo_admin width-150 %}` line shown below, the admin comes up fine. When I uncomment it, the error happens again. The error happens with both `png` and `jpg` images.
### Main Question
How could the exact same rendition be pickled fine when called from the admin but encounter a pickling error when called from an `{% image ... %}` tag??
### Steps to Reproduce
Use a `CustomImage` model and set up the following `SiteSettings` model:
```
@register_setting
class SiteSettings(BaseSiteSetting):
logo_admin = models.OneToOneField(settings.WAGTAILIMAGES_IMAGE_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
```
Render the following from the `base.html` template in `wagtailadmin`:
```
{% extends "wagtailadmin/base.html" %}
{% load wagtailimages_tags %}
{% block branding_logo %}
{% image settings.main.SiteSettings.logo_admin width-150 %}
{% endblock %}
```
- I have NOT confirmed that this issue can be reproduced as described on a fresh Wagtail project.
### Technical details
- Python version: 3.8
- Django version: 4.2.5
- Wagtail version: 5.1
</issue>
<code>
[start of wagtail/contrib/settings/models.py]
1 from django.db import models
2 from django.utils.functional import cached_property
3 from django.utils.translation import gettext as _
4
5 from wagtail.coreutils import InvokeViaAttributeShortcut
6 from wagtail.models import Site
7
8 from .registry import register_setting
9
10 __all__ = [
11 "BaseGenericSetting",
12 "BaseSiteSetting",
13 "register_setting",
14 ]
15
16
17 class AbstractSetting(models.Model):
18 """
19 The abstract base model for settings. Subclasses must be registered using
20 :func:`~wagtail.contrib.settings.registry.register_setting`
21 """
22
23 class Meta:
24 abstract = True
25
26 # Override to fetch ForeignKey values in the same query when
27 # retrieving settings (e.g. via `for_request()`)
28 select_related = None
29
30 @classmethod
31 def base_queryset(cls):
32 """
33 Returns a queryset of objects of this type to use as a base.
34
35 You can use the `select_related` attribute on your class to
36 specify a list of foreign key field names, which the method
37 will attempt to select additional related-object data for
38 when the query is executed.
39
40 If your needs are more complex than this, you can override
41 this method on your custom class.
42 """
43 queryset = cls.objects.all()
44 if cls.select_related is not None:
45 queryset = queryset.select_related(*cls.select_related)
46 return queryset
47
48 @classmethod
49 def get_cache_attr_name(cls):
50 """
51 Returns the name of the attribute that should be used to store
52 a reference to the fetched/created object on a request.
53 """
54 return f"_{cls._meta.app_label}.{cls._meta.model_name}".lower()
55
56 def __init__(self, *args, **kwargs):
57 super().__init__(*args, **kwargs)
58 # Per-instance page URL cache
59 self._page_url_cache = {}
60
61 @cached_property
62 def page_url(self):
63 # Allows get_page_url() to be invoked using
64 # `obj.page_url.foreign_key_name` syntax
65 return InvokeViaAttributeShortcut(self, "get_page_url")
66
67 def get_page_url(self, attribute_name, request=None):
68 """
69 Returns the URL of a page referenced by a foreign key
70 (or other attribute) matching the name ``attribute_name``.
71 If the field value is null, or links to something other
72 than a ``Page`` object, an empty string is returned.
73 The result is also cached per-object to facilitate
74 fast repeat access.
75
76 Raises an ``AttributeError`` if the object has no such
77 field or attribute.
78 """
79 if attribute_name in self._page_url_cache:
80 return self._page_url_cache[attribute_name]
81
82 if not hasattr(self, attribute_name):
83 raise AttributeError(
84 "'{}' object has no attribute '{}'".format(
85 self.__class__.__name__, attribute_name
86 )
87 )
88
89 page = getattr(self, attribute_name)
90
91 if hasattr(page, "specific"):
92 url = page.specific.get_url(getattr(self, "_request", None))
93 else:
94 url = ""
95
96 self._page_url_cache[attribute_name] = url
97 return url
98
99 def __getstate__(self):
100 # Ignore 'page_url' when pickling
101 state = super().__getstate__()
102 state.pop("page_url", None)
103 return state
104
105
106 class BaseSiteSetting(AbstractSetting):
107 site = models.OneToOneField(
108 Site,
109 unique=True,
110 db_index=True,
111 editable=False,
112 on_delete=models.CASCADE,
113 )
114
115 class Meta:
116 abstract = True
117
118 @classmethod
119 def for_request(cls, request):
120 """
121 Get or create an instance of this model for the request,
122 and cache the result on the request for faster repeat access.
123 """
124 attr_name = cls.get_cache_attr_name()
125 if hasattr(request, attr_name):
126 return getattr(request, attr_name)
127 site = Site.find_for_request(request)
128 site_settings = cls.for_site(site)
129 # to allow more efficient page url generation
130 site_settings._request = request
131 setattr(request, attr_name, site_settings)
132 return site_settings
133
134 @classmethod
135 def for_site(cls, site):
136 """
137 Get or create an instance of this setting for the site.
138 """
139 queryset = cls.base_queryset()
140 instance, created = queryset.get_or_create(site=site)
141 return instance
142
143 def __str__(self):
144 return _("%(site_setting)s for %(site)s") % {
145 "site_setting": self._meta.verbose_name,
146 "site": self.site,
147 }
148
149
150 class BaseGenericSetting(AbstractSetting):
151 """
152 Generic settings are singleton models - only one instance of each model
153 can be created.
154 """
155
156 class Meta:
157 abstract = True
158
159 @classmethod
160 def _get_or_create(cls):
161 """
162 Internal convenience method to get or create the first instance.
163
164 We cannot hardcode `pk=1`, for example, as not all database backends
165 use sequential IDs (e.g. Postgres).
166 """
167
168 first_obj = cls.base_queryset().first()
169 if first_obj is None:
170 return cls.objects.create()
171 return first_obj
172
173 @classmethod
174 def load(cls, request_or_site=None):
175 """
176 Get or create an instance of this model. There is only ever one
177 instance of models inheriting from `AbstractSetting` so we can
178 use `pk=1`.
179
180 If `request_or_site` is present and is a request object, then we cache
181 the result on the request for faster repeat access.
182 """
183
184 # We can only cache on the request, so if there is no request then
185 # we know there's nothing in the cache.
186 if request_or_site is None or isinstance(request_or_site, Site):
187 return cls._get_or_create()
188
189 # Check if we already have this in the cache and return it if so.
190 attr_name = cls.get_cache_attr_name()
191 if hasattr(request_or_site, attr_name):
192 return getattr(request_or_site, attr_name)
193
194 obj = cls._get_or_create()
195
196 # Cache for next time.
197 setattr(request_or_site, attr_name, obj)
198
199 return obj
200
201 def __str__(self):
202 return str(self._meta.verbose_name)
203
[end of wagtail/contrib/settings/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/contrib/settings/models.py b/wagtail/contrib/settings/models.py
--- a/wagtail/contrib/settings/models.py
+++ b/wagtail/contrib/settings/models.py
@@ -131,6 +131,12 @@
setattr(request, attr_name, site_settings)
return site_settings
+ def __getstate__(self):
+ # Leave out _request from the pickled state
+ state = super().__getstate__()
+ state.pop("_request", None)
+ return state
+
@classmethod
def for_site(cls, site):
"""
| {"golden_diff": "diff --git a/wagtail/contrib/settings/models.py b/wagtail/contrib/settings/models.py\n--- a/wagtail/contrib/settings/models.py\n+++ b/wagtail/contrib/settings/models.py\n@@ -131,6 +131,12 @@\n setattr(request, attr_name, site_settings)\n return site_settings\n \n+ def __getstate__(self):\n+ # Leave out _request from the pickled state\n+ state = super().__getstate__()\n+ state.pop(\"_request\", None)\n+ return state\n+\n @classmethod\n def for_site(cls, site):\n \"\"\"\n", "issue": "Getting a \"cannot pickle\" error in Wagtail 5.1 when when using a custom image model\n<!--\r\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\n\r\nI recently upgraded four small sites to Wagtail 5.1. None of these sites have ever used caching, and each site uses a custom image model. On all but one site I started getting the error: ` cannot pickle '_io.BufferedReader' object` every place an `{% image ... %}` tag is used. All of these sites have been working fine for several years. (As an aside, I see that in `Image.get_rendition()`, a `try...except` was removed in 5.1 that would have probably been masking any caching-related errors that might have occurred before 5.1.) The pickling error occurs both when caching is absent and when caching (Redis) is used. The `'_io.BufferedReader' object` error occurs in Django's `locmem.py` caching when no cache is specified in the settings, and a different pickling error message occurs when Redis is used, but in both cases pickling is the problem.\r\n\r\nTo test from the command line, I inserted a print statement in `locmem.py` to print out the pickled value. I then looped through the images calling `get_rendition()`. All pickled values printed correctly. To test a specific image from the command line, I uploaded a new image to the `logo_admin` field described below and did:\r\n```\r\nr=CustomRendition.objects.get(pk=63)\r\nr.image.get_rendition(r.image_spec)\r\n```\r\nThe pickled value was printed correctly. However, when this exact same image/rendition was called from an `{% image ... %}` tag in the admin as described below, the pickling error happens. The `CustomImage(AbstractImage)/CustomRendition(AbstractRendition)` definitions are the same in each of these projects and are set up [as described in the docs](https://docs.wagtail.org/en/latest/advanced_topics/images/custom_image_model.html#custom-image-models).\r\n\r\nThe easiest way to test is from the admin as described above and in **Steps to Reproduce** below. I also tried:\r\n- deleting all existing renditions\r\n- uploading a new image to the `logo_admin` field shown below. Again, when I comment out the `{% image settings.main.SiteSettings.logo_admin width-150 %}` line shown below, the admin comes up fine. When I uncomment it, the error happens again. The error happens with both `png` and `jpg` images.\r\n\r\n### Main Question\r\nHow could the exact same rendition be pickled fine when called from the admin but encounter a pickling error when called from an `{% image ... %}` tag??\r\n\r\n### Steps to Reproduce\r\n\r\nUse a `CustomImage` model and set up the following `SiteSettings` model:\r\n\r\n```\r\n@register_setting\r\nclass SiteSettings(BaseSiteSetting):\r\n logo_admin = models.OneToOneField(settings.WAGTAILIMAGES_IMAGE_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')\r\n```\r\n\r\nRender the following from the `base.html` template in `wagtailadmin`:\r\n\r\n```\r\n{% extends \"wagtailadmin/base.html\" %}\r\n{% load wagtailimages_tags %}\r\n\r\n{% block branding_logo %}\r\n {% image settings.main.SiteSettings.logo_admin width-150 %}\r\n{% endblock %}\r\n```\r\n\r\n- I have NOT confirmed that this issue can be reproduced as described on a fresh Wagtail project.\r\n\r\n### Technical details\r\n\r\n- Python version: 3.8\r\n- Django version: 4.2.5\r\n- Wagtail version: 5.1\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\n\nfrom wagtail.coreutils import InvokeViaAttributeShortcut\nfrom wagtail.models import Site\n\nfrom .registry import register_setting\n\n__all__ = [\n \"BaseGenericSetting\",\n \"BaseSiteSetting\",\n \"register_setting\",\n]\n\n\nclass AbstractSetting(models.Model):\n \"\"\"\n The abstract base model for settings. Subclasses must be registered using\n :func:`~wagtail.contrib.settings.registry.register_setting`\n \"\"\"\n\n class Meta:\n abstract = True\n\n # Override to fetch ForeignKey values in the same query when\n # retrieving settings (e.g. via `for_request()`)\n select_related = None\n\n @classmethod\n def base_queryset(cls):\n \"\"\"\n Returns a queryset of objects of this type to use as a base.\n\n You can use the `select_related` attribute on your class to\n specify a list of foreign key field names, which the method\n will attempt to select additional related-object data for\n when the query is executed.\n\n If your needs are more complex than this, you can override\n this method on your custom class.\n \"\"\"\n queryset = cls.objects.all()\n if cls.select_related is not None:\n queryset = queryset.select_related(*cls.select_related)\n return queryset\n\n @classmethod\n def get_cache_attr_name(cls):\n \"\"\"\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n \"\"\"\n return f\"_{cls._meta.app_label}.{cls._meta.model_name}\".lower()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Per-instance page URL cache\n self._page_url_cache = {}\n\n @cached_property\n def page_url(self):\n # Allows get_page_url() to be invoked using\n # `obj.page_url.foreign_key_name` syntax\n return InvokeViaAttributeShortcut(self, \"get_page_url\")\n\n def get_page_url(self, attribute_name, request=None):\n \"\"\"\n Returns the URL of a page referenced by a foreign key\n (or other attribute) matching the name ``attribute_name``.\n If the field value is null, or links to something other\n than a ``Page`` object, an empty string is returned.\n The result is also cached per-object to facilitate\n fast repeat access.\n\n Raises an ``AttributeError`` if the object has no such\n field or attribute.\n \"\"\"\n if attribute_name in self._page_url_cache:\n return self._page_url_cache[attribute_name]\n\n if not hasattr(self, attribute_name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n self.__class__.__name__, attribute_name\n )\n )\n\n page = getattr(self, attribute_name)\n\n if hasattr(page, \"specific\"):\n url = page.specific.get_url(getattr(self, \"_request\", None))\n else:\n url = \"\"\n\n self._page_url_cache[attribute_name] = url\n return url\n\n def __getstate__(self):\n # Ignore 'page_url' when pickling\n state = super().__getstate__()\n state.pop(\"page_url\", None)\n return state\n\n\nclass BaseSiteSetting(AbstractSetting):\n site = models.OneToOneField(\n Site,\n unique=True,\n db_index=True,\n editable=False,\n on_delete=models.CASCADE,\n )\n\n class Meta:\n abstract = True\n\n @classmethod\n def for_request(cls, request):\n \"\"\"\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n \"\"\"\n attr_name = cls.get_cache_attr_name()\n if hasattr(request, attr_name):\n return getattr(request, attr_name)\n site = Site.find_for_request(request)\n site_settings = cls.for_site(site)\n # to allow more efficient page url generation\n site_settings._request = request\n setattr(request, attr_name, site_settings)\n return site_settings\n\n @classmethod\n def for_site(cls, site):\n \"\"\"\n Get or create an instance of this setting for the site.\n \"\"\"\n queryset = cls.base_queryset()\n instance, created = queryset.get_or_create(site=site)\n return instance\n\n def __str__(self):\n return _(\"%(site_setting)s for %(site)s\") % {\n \"site_setting\": self._meta.verbose_name,\n \"site\": self.site,\n }\n\n\nclass BaseGenericSetting(AbstractSetting):\n \"\"\"\n Generic settings are singleton models - only one instance of each model\n can be created.\n \"\"\"\n\n class Meta:\n abstract = True\n\n @classmethod\n def _get_or_create(cls):\n \"\"\"\n Internal convenience method to get or create the first instance.\n\n We cannot hardcode `pk=1`, for example, as not all database backends\n use sequential IDs (e.g. Postgres).\n \"\"\"\n\n first_obj = cls.base_queryset().first()\n if first_obj is None:\n return cls.objects.create()\n return first_obj\n\n @classmethod\n def load(cls, request_or_site=None):\n \"\"\"\n Get or create an instance of this model. There is only ever one\n instance of models inheriting from `AbstractSetting` so we can\n use `pk=1`.\n\n If `request_or_site` is present and is a request object, then we cache\n the result on the request for faster repeat access.\n \"\"\"\n\n # We can only cache on the request, so if there is no request then\n # we know there's nothing in the cache.\n if request_or_site is None or isinstance(request_or_site, Site):\n return cls._get_or_create()\n\n # Check if we already have this in the cache and return it if so.\n attr_name = cls.get_cache_attr_name()\n if hasattr(request_or_site, attr_name):\n return getattr(request_or_site, attr_name)\n\n obj = cls._get_or_create()\n\n # Cache for next time.\n setattr(request_or_site, attr_name, obj)\n\n return obj\n\n def __str__(self):\n return str(self._meta.verbose_name)\n", "path": "wagtail/contrib/settings/models.py"}]} | 3,198 | 134 |
gh_patches_debug_40127 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Barnsley MBC. Fails when running with collection date as today
### I Have A Problem With:
A specific source
### What's Your Problem
Barnsley Council source does not work when the bin collection date is 'today'
This attempted to parse the date and then fails.
It looks like something with a check if the date is 'today' could resolve this.
```
if heading[0].text == "Today":
bin_date = datetime.today().date()
else:
bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
```
### Source (if relevant)
barnsley_gov_uk
### Logs
```Shell
fetch failed for source Barnsley Metropolitan Borough Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py", line 83, in fetch bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 333, in _strptime raise ValueError("time data %r does not match format %r" % ValueError: time data 'Today' does not match format '%A, %B %d, %Y'
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: barnsley_gov_uk
args:
postcode: S70 3QU
uprn: '100050607581'
calendar_title: Waste Collection
day_switch_time: '12:00'
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py]
1 # Credit where it's due:
2 # This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo
3 # https://github.com/robbrad/UKBinCollectionData
4
5
6 from datetime import datetime
7
8 import requests
9 from bs4 import BeautifulSoup
10 from waste_collection_schedule import Collection # type: ignore[attr-defined]
11
12 TITLE = "Barnsley Metropolitan Borough Council"
13 DESCRIPTION = "Source for Barnsley Metropolitan Borough Council."
14 URL = "https://barnsley.gov.uk"
15 TEST_CASES = {
16 "S71 1EE 100050671689": {"postcode": "S71 1EE", "uprn": 100050671689},
17 "S75 1QF 10032783992": {"postcode": "S75 1QF", "uprn": "10032783992"},
18 }
19
20
21 ICON_MAP = {
22 "grey": "mdi:trash-can",
23 "green": "mdi:leaf",
24 "blue": "mdi:package-variant",
25 "brown": "mdi:recycle",
26 }
27
28
29 API_URL = "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress"
30
31
32 class Source:
33 def __init__(self, postcode: str, uprn: str | int):
34 self._postcode: str = postcode
35 self._uprn: str | int = uprn
36
37 def fetch(self):
38 entries = []
39
40 # Pass in form data and make the POST request
41 headers = {
42 "authority": "waste.barnsley.gov.uk",
43 "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
44 "accept-language": "en-GB,en;q=0.9",
45 "cache-control": "no-cache",
46 "content-type": "application/x-www-form-urlencoded",
47 "origin": "https://waste.barnsley.gov.uk",
48 "pragma": "no-cache",
49 "referer": "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress",
50 "sec-ch-ua": '"Chromium";v="118", "Opera GX";v="104", "Not=A?Brand";v="99"',
51 "sec-ch-ua-mobile": "?0",
52 "sec-ch-ua-platform": '"Windows"',
53 "sec-fetch-dest": "document",
54 "sec-fetch-mode": "navigate",
55 "sec-fetch-site": "same-origin",
56 "sec-fetch-user": "?1",
57 "upgrade-insecure-requests": "1",
58 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.5993.118 Safari/537.36",
59 }
60 form_data = {
61 "personInfo.person1.HouseNumberOrName": "",
62 "personInfo.person1.Postcode": f"{self._postcode}",
63 "personInfo.person1.UPRN": f"{self._uprn}",
64 "person1_SelectAddress": "Select address",
65 }
66 response = requests.post(
67 "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress",
68 headers=headers,
69 data=form_data,
70 )
71 soup = BeautifulSoup(response.text, features="html.parser")
72 soup.prettify()
73
74 if response.status_code != 200:
75 raise ConnectionRefusedError(
76 "Error getting results from website! Please open an issue on GitHub!"
77 )
78
79 # Parse the response, getting the top box first and then tabled collections after
80 results = soup.find("div", {"class": "panel"}).find_all("fieldset")[0:2]
81 heading = results[0].find_all("p")[1:3]
82 bin_text = heading[1].text.strip() + " bin"
83 bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
84 entries.append(
85 Collection(
86 t=bin_text,
87 date=bin_date,
88 icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
89 )
90 )
91
92 results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
93 for row in results_table:
94 text_list = [item.text.strip() for item in row.contents if item != "\n"]
95 bin_text = text_list[1] + " bin"
96 bin_date = datetime.strptime(text_list[0], "%A, %B %d, %Y").date()
97 entries.append(
98 Collection(
99 t=bin_text,
100 date=bin_date,
101 icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
102 )
103 )
104
105 return entries
106
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
@@ -3,7 +3,7 @@
# https://github.com/robbrad/UKBinCollectionData
-from datetime import datetime
+from datetime import date, datetime
import requests
from bs4 import BeautifulSoup
@@ -15,6 +15,7 @@
TEST_CASES = {
"S71 1EE 100050671689": {"postcode": "S71 1EE", "uprn": 100050671689},
"S75 1QF 10032783992": {"postcode": "S75 1QF", "uprn": "10032783992"},
+ "test": {"postcode": "S70 3QU", "uprn": 100050607581},
}
@@ -29,6 +30,12 @@
API_URL = "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress"
+def parse_date(d: str) -> date:
+ if d.lower() == "today":
+ return datetime.now().date()
+ return datetime.strptime(d, "%A, %B %d, %Y").date()
+
+
class Source:
def __init__(self, postcode: str, uprn: str | int):
self._postcode: str = postcode
@@ -79,21 +86,10 @@
# Parse the response, getting the top box first and then tabled collections after
results = soup.find("div", {"class": "panel"}).find_all("fieldset")[0:2]
heading = results[0].find_all("p")[1:3]
- bin_text = heading[1].text.strip() + " bin"
- bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
- entries.append(
- Collection(
- t=bin_text,
- date=bin_date,
- icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
- )
- )
- results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
- for row in results_table:
- text_list = [item.text.strip() for item in row.contents if item != "\n"]
- bin_text = text_list[1] + " bin"
- bin_date = datetime.strptime(text_list[0], "%A, %B %d, %Y").date()
+ for bin in heading[1].text.strip().split(", "):
+ bin_text = bin + " bin"
+ bin_date = parse_date(heading[0].text)
entries.append(
Collection(
t=bin_text,
@@ -102,4 +98,18 @@
)
)
+ results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
+ for row in results_table:
+ text_list = [item.text.strip() for item in row.contents if item != "\n"]
+ for bin in text_list[1].split(", "):
+ bin_text = bin + " bin"
+ bin_date = parse_date(text_list[0])
+ entries.append(
+ Collection(
+ t=bin_text,
+ date=bin_date,
+ icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
+ )
+ )
+
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n@@ -3,7 +3,7 @@\n # https://github.com/robbrad/UKBinCollectionData\n \n \n-from datetime import datetime\n+from datetime import date, datetime\n \n import requests\n from bs4 import BeautifulSoup\n@@ -15,6 +15,7 @@\n TEST_CASES = {\n \"S71 1EE 100050671689\": {\"postcode\": \"S71 1EE\", \"uprn\": 100050671689},\n \"S75 1QF 10032783992\": {\"postcode\": \"S75 1QF\", \"uprn\": \"10032783992\"},\n+ \"test\": {\"postcode\": \"S70 3QU\", \"uprn\": 100050607581},\n }\n \n \n@@ -29,6 +30,12 @@\n API_URL = \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\"\n \n \n+def parse_date(d: str) -> date:\n+ if d.lower() == \"today\":\n+ return datetime.now().date()\n+ return datetime.strptime(d, \"%A, %B %d, %Y\").date()\n+\n+\n class Source:\n def __init__(self, postcode: str, uprn: str | int):\n self._postcode: str = postcode\n@@ -79,21 +86,10 @@\n # Parse the response, getting the top box first and then tabled collections after\n results = soup.find(\"div\", {\"class\": \"panel\"}).find_all(\"fieldset\")[0:2]\n heading = results[0].find_all(\"p\")[1:3]\n- bin_text = heading[1].text.strip() + \" bin\"\n- bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\n- entries.append(\n- Collection(\n- t=bin_text,\n- date=bin_date,\n- icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n- )\n- )\n \n- results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n- for row in results_table:\n- text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n- bin_text = text_list[1] + \" bin\"\n- bin_date = datetime.strptime(text_list[0], \"%A, %B %d, %Y\").date()\n+ for bin in heading[1].text.strip().split(\", \"):\n+ bin_text = bin + \" bin\"\n+ bin_date = parse_date(heading[0].text)\n entries.append(\n Collection(\n t=bin_text,\n@@ -102,4 +98,18 @@\n )\n )\n \n+ results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n+ for row in results_table:\n+ text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n+ for bin in text_list[1].split(\", \"):\n+ bin_text = bin + \" bin\"\n+ bin_date = parse_date(text_list[0])\n+ entries.append(\n+ Collection(\n+ t=bin_text,\n+ date=bin_date,\n+ icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n+ )\n+ )\n+\n return entries\n", "issue": "[Bug]: Barnsley MBC. Fails when running with collection date as today\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nBarnsley Council source does not work when the bin collection date is 'today'\r\nThis attempted to parse the date and then fails.\r\n\r\nIt looks like something with a check if the date is 'today' could resolve this.\r\n```\r\nif heading[0].text == \"Today\": \r\n bin_date = datetime.today().date()\r\nelse:\r\n bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\r\n```\n\n### Source (if relevant)\n\nbarnsley_gov_uk \n\n### Logs\n\n```Shell\nfetch failed for source Barnsley Metropolitan Borough Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\", line 83, in fetch bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 333, in _strptime raise ValueError(\"time data %r does not match format %r\" % ValueError: time data 'Today' does not match format '%A, %B %d, %Y'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: barnsley_gov_uk\r\n args:\r\n postcode: S70 3QU\r\n uprn: '100050607581'\r\n calendar_title: Waste Collection\r\n day_switch_time: '12:00'\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "# Credit where it's due:\n# This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo\n# https://github.com/robbrad/UKBinCollectionData\n\n\nfrom datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Barnsley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Barnsley Metropolitan Borough Council.\"\nURL = \"https://barnsley.gov.uk\"\nTEST_CASES = {\n \"S71 1EE 100050671689\": {\"postcode\": \"S71 1EE\", \"uprn\": 100050671689},\n \"S75 1QF 10032783992\": {\"postcode\": \"S75 1QF\", \"uprn\": \"10032783992\"},\n}\n\n\nICON_MAP = {\n \"grey\": \"mdi:trash-can\",\n \"green\": \"mdi:leaf\",\n \"blue\": \"mdi:package-variant\",\n \"brown\": \"mdi:recycle\",\n}\n\n\nAPI_URL = \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\"\n\n\nclass Source:\n def __init__(self, postcode: str, uprn: str | int):\n self._postcode: str = postcode\n self._uprn: str | int = uprn\n\n def fetch(self):\n entries = []\n\n # Pass in form data and make the POST request\n headers = {\n \"authority\": \"waste.barnsley.gov.uk\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\",\n \"accept-language\": \"en-GB,en;q=0.9\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"origin\": \"https://waste.barnsley.gov.uk\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n \"sec-ch-ua\": '\"Chromium\";v=\"118\", \"Opera GX\";v=\"104\", \"Not=A?Brand\";v=\"99\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.5993.118 Safari/537.36\",\n }\n form_data = {\n \"personInfo.person1.HouseNumberOrName\": \"\",\n \"personInfo.person1.Postcode\": f\"{self._postcode}\",\n \"personInfo.person1.UPRN\": f\"{self._uprn}\",\n \"person1_SelectAddress\": \"Select address\",\n }\n response = requests.post(\n \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n headers=headers,\n data=form_data,\n )\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n soup.prettify()\n\n if response.status_code != 200:\n raise ConnectionRefusedError(\n \"Error getting results from website! Please open an issue on GitHub!\"\n )\n\n # Parse the response, getting the top box first and then tabled collections after\n results = soup.find(\"div\", {\"class\": \"panel\"}).find_all(\"fieldset\")[0:2]\n heading = results[0].find_all(\"p\")[1:3]\n bin_text = heading[1].text.strip() + \" bin\"\n bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n for row in results_table:\n text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n bin_text = text_list[1] + \" bin\"\n bin_date = datetime.strptime(text_list[0], \"%A, %B %d, %Y\").date()\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py"}]} | 2,571 | 881 |
gh_patches_debug_12776 | rasdani/github-patches | git_diff | platformsh__platformsh-docs-2105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
:arrow_up: Update MeiliSearch
### What needs to be documented?
We should update MeiliSearch to version 0.25. This means we have to change how we manage API keys. It does mean we can create more keys for monitoring and the like.
### Additional context
[Blog post on version](https://blog.meilisearch.com/whats-new-in-v0-25/)
[Additional context](https://github.com/orgs/platformsh/projects/3)
</issue>
<code>
[start of search/main.py]
1 import os
2 import glob
3 import json
4 import meilisearch
5 from platformshconfig import Config
6
7 class Search:
8 def __init__(self):
9 self.default = {
10 "host": "http://127.0.0.1",
11 "key": None,
12 "port": 7700
13 }
14
15 self.scrape_dir = "output"
16 self.scrape_config = "config/scrape.json"
17 self.docs_index = "docs"
18 self.primaryKey = "documentId"
19 self.index_name = "Docs"
20
21 # Below are Platform.sh custom settings for how the search engine functions.
22
23 # Data available to the dropdown React app in docs, used to fill out autocomplete results.
24 self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']
25 # Data actually searchable by our queries.
26 self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']
27
28 # Show results for one query with the listed pages, when they by default would not show up as best results.
29 # Note: these aren't automatically two-way, which is why they're all defined twice.
30 self.synonyms = {
31 "cron": ["crons"],
32 "crons": ["cron tasks", "cron jobs"],
33 "e-mail": ["email"],
34 "routes.yaml": ["routes"],
35 "routes": ["routes.yaml"],
36 "services": ["services.yaml"],
37 "services.yaml": ["services"],
38 "application": [".platform.app.yaml", "app.yaml", "applications.yaml"],
39 ".platform.app.yaml": ["application"],
40 "app.yaml": ["application"],
41 "applications.yaml": ["application", "multi-app"],
42 "multi-app": ["applications.yaml"],
43 "regions": ["public ip addresses"],
44 "public ip addresses": ["regions"],
45 "ssl": ["https", "tls"],
46 "https": ["ssl"],
47 "auth": ["authentication", "access control"], # Only needs to be one way since we don't use "auth" in the docs
48 }
49
50 # Ranking rules:
51 #
52 # - Default order: ["words", "typo", "proximity", "attribute", "sort", "exactness"]
53 #
54 # - words: number of times query is in document (greater number gets priority)
55 # - typo: fewer typos > more typos
56 # - proximity: smaller distance between multiple occurences of query in same document > larger distances
57 # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in
58 # more important attributes first.
59 # - sort: queries are sorted at query time
60 # - exactness: similarity of matched words in document with query
61
62 self.ranking_rules = ["rank:asc", "attribute", "typo", "words", "proximity", "exactness"]
63
64 self.updated_settings = {
65 "rankingRules": self.ranking_rules,
66 "searchableAttributes": self.searchable_attributes,
67 "displayedAttributes": self.displayed_attributes
68 }
69
70 # Group results by page
71 self.distinct_attribute = "pageUrl"
72
73 def getConnectionString(self):
74 """
75 Sets the Meilisearch host string, depending on the environment.
76
77 Returns:
78 string: Meilisearch host string.
79 """
80 if os.environ.get('PORT'):
81 return "{}:{}".format(self.default["host"], os.environ['PORT'])
82 else:
83 return "{}:{}".format(self.default["host"], self.default["port"])
84
85 def getMasterKey(self):
86 """
87 Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.
88 """
89 config = Config()
90 if config.is_valid_platform():
91 return config.projectEntropy
92 elif os.environ.get("MEILI_MASTER_KEY"):
93 return os.environ["MEILI_MASTER_KEY"]
94 else:
95 return self.default["key"]
96
97 def add_documents(self, index):
98 """
99 Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.
100 """
101 documents = [f for f in glob.glob("{}/*.json".format(self.scrape_dir))]
102 for doc in documents:
103 self.add(doc, index)
104
105 def add(self, doc, index):
106 """
107 Add an individual site's index to the Meilisearch service.
108 """
109 with open(doc) as scraped_index:
110 data = json.load(scraped_index)
111 index.add_documents(data)
112
113 def update(self):
114 """
115 Updates the Meilisearch index.
116 """
117 # Create a Meilisearch client.
118 client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())
119
120 # Delete previous index
121 if len(client.get_indexes()):
122 client.get_index(self.docs_index).delete()
123
124 # Create a new index
125 index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
126
127 # Add synonyms for the index
128 index.update_synonyms(self.synonyms)
129
130 # Update its settings: what can be searched, what's displayable, and how results should be ranked.
131 index.update_settings(self.updated_settings)
132
133 # Update distinct attribute.
134 index.update_distinct_attribute(self.distinct_attribute)
135
136 # Add documents to the index
137 self.add_documents(index)
138
139 if __name__ == "__main__":
140 meili = Search()
141 meili.update()
142
[end of search/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/search/main.py b/search/main.py
--- a/search/main.py
+++ b/search/main.py
@@ -119,10 +119,14 @@
# Delete previous index
if len(client.get_indexes()):
- client.get_index(self.docs_index).delete()
+ client.index(self.docs_index).delete()
# Create a new index
- index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
+ create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
+
+ client.wait_for_task(create_index_task['uid'])
+
+ index = client.get_index(create_index_task['indexUid'])
# Add synonyms for the index
index.update_synonyms(self.synonyms)
| {"golden_diff": "diff --git a/search/main.py b/search/main.py\n--- a/search/main.py\n+++ b/search/main.py\n@@ -119,10 +119,14 @@\n \n # Delete previous index\n if len(client.get_indexes()):\n- client.get_index(self.docs_index).delete()\n+ client.index(self.docs_index).delete()\n \n # Create a new index\n- index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n+ create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n+\n+ client.wait_for_task(create_index_task['uid'])\n+\n+ index = client.get_index(create_index_task['indexUid'])\n \n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n", "issue": ":arrow_up: Update MeiliSearch\n### What needs to be documented?\r\n\r\nWe should update MeiliSearch to version 0.25. This means we have to change how we manage API keys. It does mean we can create more keys for monitoring and the like.\r\n\r\n### Additional context\r\n\r\n[Blog post on version](https://blog.meilisearch.com/whats-new-in-v0-25/)\r\n[Additional context](https://github.com/orgs/platformsh/projects/3)\n", "before_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n \"auth\": [\"authentication\", \"access control\"], # Only needs to be one way since we don't use \"auth\" in the docs\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}]} | 2,135 | 185 |
gh_patches_debug_5561 | rasdani/github-patches | git_diff | Showndarya__Hacktoberfest-545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make JSON linting more verbose ?
Currently we simply check :
1. json is valid
2. json contains the keys we want
3. those keys are not empty
the problem is with step 1, it's not very helpful if people have no idea what the JSON spec is.
One fix is :
1. add a link to jsonlint.com or such a service in the print of the error so that people can check themselves.
2. add something like http://deron.meranda.us/python/demjson/ with proper package caching on travis side, but it will make for slower builds still probably
</issue>
<code>
[start of .travis.py]
1 import json
2 import os
3 import re
4 import subprocess
5
6 # Get a diff between master and current.
7 try:
8 commit_range = os.environ["TRAVIS_COMMIT_RANGE"]
9 changed_files = subprocess.check_output(["git", "diff", "--name-only", commit_range])
10 except KeyError:
11 print("🔥 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.")
12 exit(1)
13
14 # Filter JSON files only.
15 changed_files_json = []
16 if changed_files:
17 changed_files = changed_files.decode()
18 for changed_file in changed_files.split('\n'):
19 if re.search(r"\.json$", changed_file):
20 changed_files_json.append(changed_file)
21
22
23 # Iterate over list of changed JSON files.
24 for changed_file_json in changed_files_json:
25 print(f"Checking file {changed_file_json}...")
26 there_was_an_error = False
27
28 if not os.path.basename(changed_file_json)[0].isupper():
29 there_was_an_error = True
30 print("🔥 File name not capitalized.")
31
32 try:
33 with open(changed_file_json) as data_file:
34 file_content = json.loads(data_file.read())
35 except json.decoder.JSONDecodeError:
36 there_was_an_error = True
37 print("🔥 JSON could not be parsed.")
38
39 if 'word' not in file_content:
40 there_was_an_error = True
41 print("🔥 Key 'word' not found.")
42
43 if not file_content["word"]:
44 there_was_an_error = True
45 print("🔥 Value for 'word' appears to be empty.")
46
47 if 'definitions' not in file_content:
48 there_was_an_error = True
49 print("🔥 Key 'definitions' not found.")
50
51 if not file_content["definitions"]:
52 there_was_an_error = True
53 print("🔥 Value for 'definitions' appears to be empty.")
54
55 if 'parts-of-speech' not in file_content:
56 there_was_an_error = True
57 print("🔥 Key 'parts-of-speech' not found.")
58
59 if not file_content["parts-of-speech"]:
60 there_was_an_error = True
61 print("🔥 Value for 'parts-of-speech' appears to be empty.")
62
63 if there_was_an_error:
64 exit(1)
65
[end of .travis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/.travis.py b/.travis.py
--- a/.travis.py
+++ b/.travis.py
@@ -34,7 +34,7 @@
file_content = json.loads(data_file.read())
except json.decoder.JSONDecodeError:
there_was_an_error = True
- print("🔥 JSON could not be parsed.")
+ print(f"🔥 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}")
if 'word' not in file_content:
there_was_an_error = True
| {"golden_diff": "diff --git a/.travis.py b/.travis.py\n--- a/.travis.py\n+++ b/.travis.py\n@@ -34,7 +34,7 @@\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n- print(\"\ud83d\udd25 JSON could not be parsed.\")\n+ print(f\"\ud83d\udd25 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}\")\n \n if 'word' not in file_content:\n there_was_an_error = True\n", "issue": "Make JSON linting more verbose ?\nCurrently we simply check :\r\n1. json is valid\r\n2. json contains the keys we want\r\n3. those keys are not empty\r\n\r\nthe problem is with step 1, it's not very helpful if people have no idea what the JSON spec is.\r\n\r\nOne fix is :\r\n1. add a link to jsonlint.com or such a service in the print of the error so that people can check themselves.\r\n2. add something like http://deron.meranda.us/python/demjson/ with proper package caching on travis side, but it will make for slower builds still probably\n", "before_files": [{"content": "import json\nimport os\nimport re\nimport subprocess\n\n# Get a diff between master and current.\ntry:\n commit_range = os.environ[\"TRAVIS_COMMIT_RANGE\"]\n changed_files = subprocess.check_output([\"git\", \"diff\", \"--name-only\", commit_range])\nexcept KeyError:\n print(\"\ud83d\udd25 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.\")\n exit(1)\n\n# Filter JSON files only.\nchanged_files_json = []\nif changed_files:\n changed_files = changed_files.decode()\n for changed_file in changed_files.split('\\n'):\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n\n \n# Iterate over list of changed JSON files.\nfor changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n\n if not os.path.basename(changed_file_json)[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n\n try:\n with open(changed_file_json) as data_file:\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n print(\"\ud83d\udd25 JSON could not be parsed.\")\n\n if 'word' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'word' not found.\")\n\n if not file_content[\"word\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'word' appears to be empty.\")\n\n if 'definitions' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'definitions' not found.\")\n\n if not file_content[\"definitions\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'definitions' appears to be empty.\")\n\n if 'parts-of-speech' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'parts-of-speech' not found.\")\n\n if not file_content[\"parts-of-speech\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'parts-of-speech' appears to be empty.\")\n\n if there_was_an_error:\n exit(1)\n", "path": ".travis.py"}]} | 1,254 | 128 |
gh_patches_debug_38657 | rasdani/github-patches | git_diff | litestar-org__litestar-1780 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/sqlalchemy/types.py]
1 from __future__ import annotations
2
3 import uuid
4 from base64 import b64decode
5 from typing import TYPE_CHECKING, Any, cast
6
7 from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB
8 from sqlalchemy.dialects.oracle import RAW as ORA_RAW
9 from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB
10 from sqlalchemy.dialects.postgresql import UUID as PG_UUID
11 from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator
12 from sqlalchemy.types import JSON as _JSON
13
14 if TYPE_CHECKING:
15 from sqlalchemy.engine import Dialect
16
17 BigIntIdentity = BigInteger().with_variant(Integer, "sqlite")
18
19
20 class GUID(TypeDecorator):
21 """Platform-independent GUID type.
22
23 Uses PostgreSQL's UUID type, Oracle's RAW(16) type, otherwise uses
24 BINARY(16) or CHAR(32), storing as stringified hex values.
25
26 Will accept stringified UUIDs as a hexstring or an actual UUID
27
28 """
29
30 impl = BINARY(16)
31 cache_ok = True
32
33 @property
34 def python_type(self) -> type[uuid.UUID]:
35 return uuid.UUID
36
37 def __init__(self, *args: Any, binary: bool = True, **kwargs: Any) -> None:
38 self.binary = binary
39
40 def load_dialect_impl(self, dialect: Dialect) -> Any:
41 if dialect.name in {"postgresql", "duckdb"}:
42 return dialect.type_descriptor(PG_UUID())
43 if dialect.name == "oracle":
44 return dialect.type_descriptor(ORA_RAW(16))
45 if self.binary:
46 return dialect.type_descriptor(BINARY(16))
47 return dialect.type_descriptor(CHAR(32))
48
49 def process_bind_param(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> bytes | str | None:
50 if value is None:
51 return value
52 if dialect.name in {"postgresql", "duckdb"}:
53 return str(value)
54 value = self.to_uuid(value)
55 if value is None:
56 return value
57 if dialect.name in {"oracle", "spanner+spanner"}:
58 return value.bytes
59 return value.bytes if self.binary else value.hex
60
61 def process_result_value(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> uuid.UUID | None:
62 if value is None:
63 return value
64 if isinstance(value, uuid.UUID):
65 return value
66 if dialect.name == "spanner+spanner":
67 return uuid.UUID(bytes=b64decode(value))
68 if self.binary:
69 return uuid.UUID(bytes=cast("bytes", value))
70 return uuid.UUID(hex=cast("str", value))
71
72 @staticmethod
73 def to_uuid(value: Any) -> uuid.UUID | None:
74 if isinstance(value, uuid.UUID) or value is None:
75 return value
76 try:
77 value = uuid.UUID(hex=value)
78 except (TypeError, ValueError):
79 value = uuid.UUID(bytes=value)
80 return cast("uuid.UUID | None", value)
81
82
83 class JSON(TypeDecorator):
84 """Platform-independent JSON type.
85
86 Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.
87
88 JSON = _JSON().with_variant(PG_JSONB, "postgresql").with_variant(ORA_BLOB, "oracle")
89
90 """
91
92 impl = _JSON
93 cache_ok = True
94
95 @property
96 def python_type(self) -> type[dict]:
97 return dict
98
99 def __init__(self, *args: Any, **kwargs: Any) -> None:
100 """Initialize JSON type"""
101
102 def load_dialect_impl(self, dialect: Dialect) -> Any:
103 if dialect.name == "postgresql":
104 return dialect.type_descriptor(PG_JSONB()) # type: ignore
105 if dialect.name == "oracle":
106 return dialect.type_descriptor(ORA_BLOB())
107 return dialect.type_descriptor(_JSON())
108
[end of litestar/contrib/sqlalchemy/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/sqlalchemy/types.py b/litestar/contrib/sqlalchemy/types.py
--- a/litestar/contrib/sqlalchemy/types.py
+++ b/litestar/contrib/sqlalchemy/types.py
@@ -4,11 +4,12 @@
from base64 import b64decode
from typing import TYPE_CHECKING, Any, cast
+from sqlalchemy import text, util
from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB
from sqlalchemy.dialects.oracle import RAW as ORA_RAW
from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB
from sqlalchemy.dialects.postgresql import UUID as PG_UUID
-from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator
+from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, SchemaType, TypeDecorator
from sqlalchemy.types import JSON as _JSON
if TYPE_CHECKING:
@@ -80,7 +81,7 @@
return cast("uuid.UUID | None", value)
-class JSON(TypeDecorator):
+class JSON(TypeDecorator, SchemaType): # type: ignore
"""Platform-independent JSON type.
Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.
@@ -98,6 +99,8 @@
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize JSON type"""
+ self.name = kwargs.pop("name", None)
+ self.oracle_strict = kwargs.pop("oracle_strict", True)
def load_dialect_impl(self, dialect: Dialect) -> Any:
if dialect.name == "postgresql":
@@ -105,3 +108,31 @@
if dialect.name == "oracle":
return dialect.type_descriptor(ORA_BLOB())
return dialect.type_descriptor(_JSON())
+
+ def _should_create_constraint(self, compiler: Any, **kw: Any) -> bool:
+ return bool(compiler.dialect.name == "oracle")
+
+ def _variant_mapping_for_set_table(self, column: Any) -> dict | None:
+ if column.type._variant_mapping:
+ variant_mapping = dict(column.type._variant_mapping)
+ variant_mapping["_default"] = column.type
+ else:
+ variant_mapping = None
+ return variant_mapping
+
+ @util.preload_module("sqlalchemy.sql.schema")
+ def _set_table(self, column: Any, table: Any) -> None:
+ schema = util.preloaded.sql_schema
+ variant_mapping = self._variant_mapping_for_set_table(column)
+ constraint_options = "(strict)" if self.oracle_strict else ""
+ sqltext = text(f"{column.name} is json {constraint_options}")
+ e = schema.CheckConstraint(
+ sqltext,
+ name=f"{column.name}_is_json",
+ _create_rule=util.portable_instancemethod( # type: ignore[no-untyped-call]
+ self._should_create_constraint,
+ {"variant_mapping": variant_mapping},
+ ),
+ _type_bound=True,
+ )
+ table.append_constraint(e)
| {"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/types.py b/litestar/contrib/sqlalchemy/types.py\n--- a/litestar/contrib/sqlalchemy/types.py\n+++ b/litestar/contrib/sqlalchemy/types.py\n@@ -4,11 +4,12 @@\n from base64 import b64decode\n from typing import TYPE_CHECKING, Any, cast\n \n+from sqlalchemy import text, util\n from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB\n from sqlalchemy.dialects.oracle import RAW as ORA_RAW\n from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB\n from sqlalchemy.dialects.postgresql import UUID as PG_UUID\n-from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator\n+from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, SchemaType, TypeDecorator\n from sqlalchemy.types import JSON as _JSON\n \n if TYPE_CHECKING:\n@@ -80,7 +81,7 @@\n return cast(\"uuid.UUID | None\", value)\n \n \n-class JSON(TypeDecorator):\n+class JSON(TypeDecorator, SchemaType): # type: ignore\n \"\"\"Platform-independent JSON type.\n \n Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.\n@@ -98,6 +99,8 @@\n \n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize JSON type\"\"\"\n+ self.name = kwargs.pop(\"name\", None)\n+ self.oracle_strict = kwargs.pop(\"oracle_strict\", True)\n \n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name == \"postgresql\":\n@@ -105,3 +108,31 @@\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_BLOB())\n return dialect.type_descriptor(_JSON())\n+\n+ def _should_create_constraint(self, compiler: Any, **kw: Any) -> bool:\n+ return bool(compiler.dialect.name == \"oracle\")\n+\n+ def _variant_mapping_for_set_table(self, column: Any) -> dict | None:\n+ if column.type._variant_mapping:\n+ variant_mapping = dict(column.type._variant_mapping)\n+ variant_mapping[\"_default\"] = column.type\n+ else:\n+ variant_mapping = None\n+ return variant_mapping\n+\n+ @util.preload_module(\"sqlalchemy.sql.schema\")\n+ def _set_table(self, column: Any, table: Any) -> None:\n+ schema = util.preloaded.sql_schema\n+ variant_mapping = self._variant_mapping_for_set_table(column)\n+ constraint_options = \"(strict)\" if self.oracle_strict else \"\"\n+ sqltext = text(f\"{column.name} is json {constraint_options}\")\n+ e = schema.CheckConstraint(\n+ sqltext,\n+ name=f\"{column.name}_is_json\",\n+ _create_rule=util.portable_instancemethod( # type: ignore[no-untyped-call]\n+ self._should_create_constraint,\n+ {\"variant_mapping\": variant_mapping},\n+ ),\n+ _type_bound=True,\n+ )\n+ table.append_constraint(e)\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport uuid\nfrom base64 import b64decode\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom sqlalchemy.dialects.oracle import BLOB as ORA_BLOB\nfrom sqlalchemy.dialects.oracle import RAW as ORA_RAW\nfrom sqlalchemy.dialects.postgresql import JSONB as PG_JSONB\nfrom sqlalchemy.dialects.postgresql import UUID as PG_UUID\nfrom sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator\nfrom sqlalchemy.types import JSON as _JSON\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Dialect\n\nBigIntIdentity = BigInteger().with_variant(Integer, \"sqlite\")\n\n\nclass GUID(TypeDecorator):\n \"\"\"Platform-independent GUID type.\n\n Uses PostgreSQL's UUID type, Oracle's RAW(16) type, otherwise uses\n BINARY(16) or CHAR(32), storing as stringified hex values.\n\n Will accept stringified UUIDs as a hexstring or an actual UUID\n\n \"\"\"\n\n impl = BINARY(16)\n cache_ok = True\n\n @property\n def python_type(self) -> type[uuid.UUID]:\n return uuid.UUID\n\n def __init__(self, *args: Any, binary: bool = True, **kwargs: Any) -> None:\n self.binary = binary\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return dialect.type_descriptor(PG_UUID())\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_RAW(16))\n if self.binary:\n return dialect.type_descriptor(BINARY(16))\n return dialect.type_descriptor(CHAR(32))\n\n def process_bind_param(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> bytes | str | None:\n if value is None:\n return value\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return str(value)\n value = self.to_uuid(value)\n if value is None:\n return value\n if dialect.name in {\"oracle\", \"spanner+spanner\"}:\n return value.bytes\n return value.bytes if self.binary else value.hex\n\n def process_result_value(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> uuid.UUID | None:\n if value is None:\n return value\n if isinstance(value, uuid.UUID):\n return value\n if dialect.name == \"spanner+spanner\":\n return uuid.UUID(bytes=b64decode(value))\n if self.binary:\n return uuid.UUID(bytes=cast(\"bytes\", value))\n return uuid.UUID(hex=cast(\"str\", value))\n\n @staticmethod\n def to_uuid(value: Any) -> uuid.UUID | None:\n if isinstance(value, uuid.UUID) or value is None:\n return value\n try:\n value = uuid.UUID(hex=value)\n except (TypeError, ValueError):\n value = uuid.UUID(bytes=value)\n return cast(\"uuid.UUID | None\", value)\n\n\nclass JSON(TypeDecorator):\n \"\"\"Platform-independent JSON type.\n\n Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.\n\n JSON = _JSON().with_variant(PG_JSONB, \"postgresql\").with_variant(ORA_BLOB, \"oracle\")\n\n \"\"\"\n\n impl = _JSON\n cache_ok = True\n\n @property\n def python_type(self) -> type[dict]:\n return dict\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize JSON type\"\"\"\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name == \"postgresql\":\n return dialect.type_descriptor(PG_JSONB()) # type: ignore\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_BLOB())\n return dialect.type_descriptor(_JSON())\n", "path": "litestar/contrib/sqlalchemy/types.py"}]} | 1,775 | 681 |
gh_patches_debug_19016 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PS-1] RPC service `pull_embedding_vector` implementation
</issue>
<code>
[start of elasticdl/python/ps/servicer.py]
1 import threading
2
3 from google.protobuf import empty_pb2
4
5 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
6 from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
7
8
9 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
10 """PS service implementation"""
11
12 def __init__(
13 self,
14 parameters,
15 grads_to_wait,
16 optimizer,
17 lr_staleness_modulation=False,
18 use_async=False,
19 ):
20 self._parameters = parameters
21 self._grads_to_wait = grads_to_wait
22 self._optimizer = optimizer
23 self._lr_staleness_modulation = lr_staleness_modulation
24 self._use_async = use_async
25 self._version = 0
26 self._lock = threading.Lock()
27
28 def pull_variable(self, request, _):
29 """
30 Response with all non-embedding parameters if initialized.
31 """
32 res = elasticdl_pb2.PullVariableResponse()
33 if not self._parameters.init_status:
34 res.model_init_status = False
35 return res
36
37 # Only sync-SGD needs lock
38 # TODO: use a read-write lock to support multiple concurrent reads
39 if not self._use_async:
40 self._lock.acquire()
41 res.model.version = self._parameters.version
42 for name, var in self._parameters.non_embedding_params.items():
43 tensor = res.model.param.add()
44 tensor.name = name
45 tensor.dim.extend(var.shape.as_list())
46 var_values = var.numpy()
47 tensor.content = var_values.tobytes()
48 tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)
49 if not self._use_async:
50 self._lock.release()
51 res.model_init_status = True
52 return res
53
54 def pull_embedding_vector(self, request, _):
55 # TODO: implement this RPC service
56 return elasticdl_pb2.Tensor()
57
58 def push_model(self, request, _):
59 with self._lock:
60 self._parameters.init_from_model_pb(request)
61 return empty_pb2.Empty()
62
63 def push_gradient(self, request, _):
64 # TODO: implement this RPC service
65 return elasticdl_pb2.PushGradientResponse()
66
[end of elasticdl/python/ps/servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -4,6 +4,7 @@
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
+from elasticdl.python.common.tensor import Tensor, serialize_tensor
class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
@@ -52,8 +53,15 @@
return res
def pull_embedding_vector(self, request, _):
- # TODO: implement this RPC service
- return elasticdl_pb2.Tensor()
+ ret = elasticdl_pb2.Tensor()
+ if not request.ids:
+ return ret
+ embedding_vectors = self._parameters.get_embedding_param(
+ request.name, request.ids
+ )
+ tensor = Tensor(values=embedding_vectors)
+ serialize_tensor(tensor, ret)
+ return ret
def push_model(self, request, _):
with self._lock:
| {"golden_diff": "diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -4,6 +4,7 @@\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n from elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n+from elasticdl.python.common.tensor import Tensor, serialize_tensor\n \n \n class PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n@@ -52,8 +53,15 @@\n return res\n \n def pull_embedding_vector(self, request, _):\n- # TODO: implement this RPC service\n- return elasticdl_pb2.Tensor()\n+ ret = elasticdl_pb2.Tensor()\n+ if not request.ids:\n+ return ret\n+ embedding_vectors = self._parameters.get_embedding_param(\n+ request.name, request.ids\n+ )\n+ tensor = Tensor(values=embedding_vectors)\n+ serialize_tensor(tensor, ret)\n+ return ret\n \n def push_model(self, request, _):\n with self._lock:\n", "issue": "[PS-1] RPC service `pull_embedding_vector` implementation\n\n", "before_files": [{"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.Tensor()\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]} | 1,144 | 255 |
gh_patches_debug_37115 | rasdani/github-patches | git_diff | SCons__scons-3697 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CompilationDatabase tool emits a field called `target` which should be called `output`
**Describe the bug**
With the new integrated SCons compilation database support, there is a field called `target` in the resulting file:
```
"target": "build/cached/third_party/s2/base/int128.dyn.o"
```
Per the compilation database documentation, it seems this field should instead be called `output`:
https://clang.llvm.org/docs/JSONCompilationDatabase.html#format
**Required information**
* Link to SCons Users thread discussing your issue.
Discussed directly with @bdbaddog
* Version of SCons
master
* Version of Python
3.7
* Which python distribution if applicable (python.org, cygwin, anaconda, macports, brew,etc)
* How you installed SCons
Using it from source
* What Platform are you on? (Linux/Windows and which version)
macOS, but it shouldn't matter.
</issue>
<code>
[start of SCons/Tool/compilation_db.py]
1 """
2 Implements the ability for SCons to emit a compilation database for the MongoDB project. See
3 http://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation
4 database is, and why you might want one. The only user visible entry point here is
5 'env.CompilationDatabase'. This method takes an optional 'target' to name the file that
6 should hold the compilation database, otherwise, the file defaults to compile_commands.json,
7 which is the name that most clang tools search for by default.
8 """
9
10 # Copyright 2020 MongoDB Inc.
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining
13 # a copy of this software and associated documentation files (the
14 # "Software"), to deal in the Software without restriction, including
15 # without limitation the rights to use, copy, modify, merge, publish,
16 # distribute, sublicense, and/or sell copies of the Software, and to
17 # permit persons to whom the Software is furnished to do so, subject to
18 # the following conditions:
19 #
20 # The above copyright notice and this permission notice shall be included
21 # in all copies or substantial portions of the Software.
22 #
23 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
24 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
25 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
27 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
28 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
29 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #
31
32 import json
33 import itertools
34 import SCons
35
36 from .cxx import CXXSuffixes
37 from .cc import CSuffixes
38 from .asm import ASSuffixes, ASPPSuffixes
39
40 # TODO: Is there a better way to do this than this global? Right now this exists so that the
41 # emitter we add can record all of the things it emits, so that the scanner for the top level
42 # compilation database can access the complete list, and also so that the writer has easy
43 # access to write all of the files. But it seems clunky. How can the emitter and the scanner
44 # communicate more gracefully?
45 __COMPILATION_DB_ENTRIES = []
46
47
48 # We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even
49 # integrate with the cache, but there doesn't seem to be much call for it.
50 class __CompilationDbNode(SCons.Node.Python.Value):
51 def __init__(self, value):
52 SCons.Node.Python.Value.__init__(self, value)
53 self.Decider(changed_since_last_build_node)
54
55
56 def changed_since_last_build_node(child, target, prev_ni, node):
57 """ Dummy decider to force always building"""
58 return True
59
60
61 def make_emit_compilation_DB_entry(comstr):
62 """
63 Effectively this creates a lambda function to capture:
64 * command line
65 * source
66 * target
67 :param comstr: unevaluated command line
68 :return: an emitter which has captured the above
69 """
70 user_action = SCons.Action.Action(comstr)
71
72 def emit_compilation_db_entry(target, source, env):
73 """
74 This emitter will be added to each c/c++ object build to capture the info needed
75 for clang tools
76 :param target: target node(s)
77 :param source: source node(s)
78 :param env: Environment for use building this node
79 :return: target(s), source(s)
80 """
81
82 dbtarget = __CompilationDbNode(source)
83
84 entry = env.__COMPILATIONDB_Entry(
85 target=dbtarget,
86 source=[],
87 __COMPILATIONDB_UTARGET=target,
88 __COMPILATIONDB_USOURCE=source,
89 __COMPILATIONDB_UACTION=user_action,
90 __COMPILATIONDB_ENV=env,
91 )
92
93 # TODO: Technically, these next two lines should not be required: it should be fine to
94 # cache the entries. However, they don't seem to update properly. Since they are quick
95 # to re-generate disable caching and sidestep this problem.
96 env.AlwaysBuild(entry)
97 env.NoCache(entry)
98
99 __COMPILATION_DB_ENTRIES.append(dbtarget)
100
101 return target, source
102
103 return emit_compilation_db_entry
104
105
106 def compilation_db_entry_action(target, source, env, **kw):
107 """
108 Create a dictionary with evaluated command line, target, source
109 and store that info as an attribute on the target
110 (Which has been stored in __COMPILATION_DB_ENTRIES array
111 :param target: target node(s)
112 :param source: source node(s)
113 :param env: Environment for use building this node
114 :param kw:
115 :return: None
116 """
117
118 command = env["__COMPILATIONDB_UACTION"].strfunction(
119 target=env["__COMPILATIONDB_UTARGET"],
120 source=env["__COMPILATIONDB_USOURCE"],
121 env=env["__COMPILATIONDB_ENV"],
122 )
123
124 entry = {
125 "directory": env.Dir("#").abspath,
126 "command": command,
127 "file": env["__COMPILATIONDB_USOURCE"][0],
128 "target": env['__COMPILATIONDB_UTARGET'][0]
129 }
130
131 target[0].write(entry)
132
133
134 def write_compilation_db(target, source, env):
135 entries = []
136
137 use_abspath = env['COMPILATIONDB_USE_ABSPATH'] in [True, 1, 'True', 'true']
138
139 for s in __COMPILATION_DB_ENTRIES:
140 entry = s.read()
141 source_file = entry['file']
142 target_file = entry['target']
143
144 if use_abspath:
145 source_file = source_file.abspath
146 target_file = target_file.abspath
147 else:
148 source_file = source_file.path
149 target_file = target_file.path
150
151 path_entry = {'directory': entry['directory'],
152 'command': entry['command'],
153 'file': source_file,
154 'target': target_file}
155
156 entries.append(path_entry)
157
158 with open(target[0].path, "w") as target_file:
159 json.dump(
160 entries, target_file, sort_keys=True, indent=4, separators=(",", ": ")
161 )
162
163
164 def scan_compilation_db(node, env, path):
165 return __COMPILATION_DB_ENTRIES
166
167
168 def compilation_db_emitter(target, source, env):
169 """ fix up the source/targets """
170
171 # Someone called env.CompilationDatabase('my_targetname.json')
172 if not target and len(source) == 1:
173 target = source
174
175 # Default target name is compilation_db.json
176 if not target:
177 target = ['compile_commands.json', ]
178
179 # No source should have been passed. Drop it.
180 if source:
181 source = []
182
183 return target, source
184
185
186 def generate(env, **kwargs):
187 static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
188
189 env["COMPILATIONDB_COMSTR"] = kwargs.get(
190 "COMPILATIONDB_COMSTR", "Building compilation database $TARGET"
191 )
192
193 components_by_suffix = itertools.chain(
194 itertools.product(
195 CSuffixes,
196 [
197 (static_obj, SCons.Defaults.StaticObjectEmitter, "$CCCOM"),
198 (shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCCCOM"),
199 ],
200 ),
201 itertools.product(
202 CXXSuffixes,
203 [
204 (static_obj, SCons.Defaults.StaticObjectEmitter, "$CXXCOM"),
205 (shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCXXCOM"),
206 ],
207 ),
208 itertools.product(
209 ASSuffixes,
210 [(static_obj, SCons.Defaults.StaticObjectEmitter, "$ASCOM")],
211 [(shared_obj, SCons.Defaults.SharedObjectEmitter, "$ASCOM")],
212 ),
213 itertools.product(
214 ASPPSuffixes,
215 [(static_obj, SCons.Defaults.StaticObjectEmitter, "$ASPPCOM")],
216 [(shared_obj, SCons.Defaults.SharedObjectEmitter, "$ASPPCOM")],
217 ),
218 )
219
220 for entry in components_by_suffix:
221 suffix = entry[0]
222 builder, base_emitter, command = entry[1]
223
224 # Assumes a dictionary emitter
225 emitter = builder.emitter.get(suffix, False)
226 if emitter:
227 # We may not have tools installed which initialize all or any of
228 # cxx, cc, or assembly. If not skip resetting the respective emitter.
229 builder.emitter[suffix] = SCons.Builder.ListEmitter(
230 [emitter, make_emit_compilation_DB_entry(command), ]
231 )
232
233 env["BUILDERS"]["__COMPILATIONDB_Entry"] = SCons.Builder.Builder(
234 action=SCons.Action.Action(compilation_db_entry_action, None),
235 )
236
237 env["BUILDERS"]["CompilationDatabase"] = SCons.Builder.Builder(
238 action=SCons.Action.Action(write_compilation_db, "$COMPILATIONDB_COMSTR"),
239 target_scanner=SCons.Scanner.Scanner(
240 function=scan_compilation_db, node_class=None
241 ),
242 emitter=compilation_db_emitter,
243 suffix='json',
244 )
245
246 env['COMPILATIONDB_USE_ABSPATH'] = False
247
248
249 def exists(env):
250 return True
251
[end of SCons/Tool/compilation_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/SCons/Tool/compilation_db.py b/SCons/Tool/compilation_db.py
--- a/SCons/Tool/compilation_db.py
+++ b/SCons/Tool/compilation_db.py
@@ -84,7 +84,7 @@
entry = env.__COMPILATIONDB_Entry(
target=dbtarget,
source=[],
- __COMPILATIONDB_UTARGET=target,
+ __COMPILATIONDB_UOUTPUT=target,
__COMPILATIONDB_USOURCE=source,
__COMPILATIONDB_UACTION=user_action,
__COMPILATIONDB_ENV=env,
@@ -116,7 +116,7 @@
"""
command = env["__COMPILATIONDB_UACTION"].strfunction(
- target=env["__COMPILATIONDB_UTARGET"],
+ target=env["__COMPILATIONDB_UOUTPUT"],
source=env["__COMPILATIONDB_USOURCE"],
env=env["__COMPILATIONDB_ENV"],
)
@@ -125,7 +125,7 @@
"directory": env.Dir("#").abspath,
"command": command,
"file": env["__COMPILATIONDB_USOURCE"][0],
- "target": env['__COMPILATIONDB_UTARGET'][0]
+ "output": env['__COMPILATIONDB_UOUTPUT'][0]
}
target[0].write(entry)
@@ -139,25 +139,25 @@
for s in __COMPILATION_DB_ENTRIES:
entry = s.read()
source_file = entry['file']
- target_file = entry['target']
+ output_file = entry['output']
if use_abspath:
source_file = source_file.abspath
- target_file = target_file.abspath
+ output_file = output_file.abspath
else:
source_file = source_file.path
- target_file = target_file.path
+ output_file = output_file.path
path_entry = {'directory': entry['directory'],
'command': entry['command'],
'file': source_file,
- 'target': target_file}
+ 'output': output_file}
entries.append(path_entry)
- with open(target[0].path, "w") as target_file:
+ with open(target[0].path, "w") as output_file:
json.dump(
- entries, target_file, sort_keys=True, indent=4, separators=(",", ": ")
+ entries, output_file, sort_keys=True, indent=4, separators=(",", ": ")
)
| {"golden_diff": "diff --git a/SCons/Tool/compilation_db.py b/SCons/Tool/compilation_db.py\n--- a/SCons/Tool/compilation_db.py\n+++ b/SCons/Tool/compilation_db.py\n@@ -84,7 +84,7 @@\n entry = env.__COMPILATIONDB_Entry(\n target=dbtarget,\n source=[],\n- __COMPILATIONDB_UTARGET=target,\n+ __COMPILATIONDB_UOUTPUT=target,\n __COMPILATIONDB_USOURCE=source,\n __COMPILATIONDB_UACTION=user_action,\n __COMPILATIONDB_ENV=env,\n@@ -116,7 +116,7 @@\n \"\"\"\n \n command = env[\"__COMPILATIONDB_UACTION\"].strfunction(\n- target=env[\"__COMPILATIONDB_UTARGET\"],\n+ target=env[\"__COMPILATIONDB_UOUTPUT\"],\n source=env[\"__COMPILATIONDB_USOURCE\"],\n env=env[\"__COMPILATIONDB_ENV\"],\n )\n@@ -125,7 +125,7 @@\n \"directory\": env.Dir(\"#\").abspath,\n \"command\": command,\n \"file\": env[\"__COMPILATIONDB_USOURCE\"][0],\n- \"target\": env['__COMPILATIONDB_UTARGET'][0]\n+ \"output\": env['__COMPILATIONDB_UOUTPUT'][0]\n }\n \n target[0].write(entry)\n@@ -139,25 +139,25 @@\n for s in __COMPILATION_DB_ENTRIES:\n entry = s.read()\n source_file = entry['file']\n- target_file = entry['target']\n+ output_file = entry['output']\n \n if use_abspath:\n source_file = source_file.abspath\n- target_file = target_file.abspath\n+ output_file = output_file.abspath\n else:\n source_file = source_file.path\n- target_file = target_file.path\n+ output_file = output_file.path\n \n path_entry = {'directory': entry['directory'],\n 'command': entry['command'],\n 'file': source_file,\n- 'target': target_file}\n+ 'output': output_file}\n \n entries.append(path_entry)\n \n- with open(target[0].path, \"w\") as target_file:\n+ with open(target[0].path, \"w\") as output_file:\n json.dump(\n- entries, target_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n+ entries, output_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n", "issue": "CompilationDatabase tool emits a field called `target` which should be called `output`\n**Describe the bug**\r\nWith the new integrated SCons compilation database support, there is a field called `target` in the resulting file:\r\n\r\n```\r\n\"target\": \"build/cached/third_party/s2/base/int128.dyn.o\"\r\n```\r\n\r\nPer the compilation database documentation, it seems this field should instead be called `output`:\r\n\r\nhttps://clang.llvm.org/docs/JSONCompilationDatabase.html#format\r\n\r\n\r\n**Required information**\r\n* Link to SCons Users thread discussing your issue.\r\nDiscussed directly with @bdbaddog \r\n\r\n* Version of SCons\r\nmaster\r\n\r\n* Version of Python\r\n3.7\r\n\r\n* Which python distribution if applicable (python.org, cygwin, anaconda, macports, brew,etc)\r\n* How you installed SCons\r\nUsing it from source\r\n\r\n* What Platform are you on? (Linux/Windows and which version)\r\nmacOS, but it shouldn't matter.\r\n\n", "before_files": [{"content": "\"\"\"\nImplements the ability for SCons to emit a compilation database for the MongoDB project. See\nhttp://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation\ndatabase is, and why you might want one. The only user visible entry point here is\n'env.CompilationDatabase'. This method takes an optional 'target' to name the file that\nshould hold the compilation database, otherwise, the file defaults to compile_commands.json,\nwhich is the name that most clang tools search for by default.\n\"\"\"\n\n# Copyright 2020 MongoDB Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nimport json\nimport itertools\nimport SCons\n\nfrom .cxx import CXXSuffixes\nfrom .cc import CSuffixes\nfrom .asm import ASSuffixes, ASPPSuffixes\n\n# TODO: Is there a better way to do this than this global? Right now this exists so that the\n# emitter we add can record all of the things it emits, so that the scanner for the top level\n# compilation database can access the complete list, and also so that the writer has easy\n# access to write all of the files. But it seems clunky. How can the emitter and the scanner\n# communicate more gracefully?\n__COMPILATION_DB_ENTRIES = []\n\n\n# We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even\n# integrate with the cache, but there doesn't seem to be much call for it.\nclass __CompilationDbNode(SCons.Node.Python.Value):\n def __init__(self, value):\n SCons.Node.Python.Value.__init__(self, value)\n self.Decider(changed_since_last_build_node)\n\n\ndef changed_since_last_build_node(child, target, prev_ni, node):\n \"\"\" Dummy decider to force always building\"\"\"\n return True\n\n\ndef make_emit_compilation_DB_entry(comstr):\n \"\"\"\n Effectively this creates a lambda function to capture:\n * command line\n * source\n * target\n :param comstr: unevaluated command line\n :return: an emitter which has captured the above\n \"\"\"\n user_action = SCons.Action.Action(comstr)\n\n def emit_compilation_db_entry(target, source, env):\n \"\"\"\n This emitter will be added to each c/c++ object build to capture the info needed\n for clang tools\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :return: target(s), source(s)\n \"\"\"\n\n dbtarget = __CompilationDbNode(source)\n\n entry = env.__COMPILATIONDB_Entry(\n target=dbtarget,\n source=[],\n __COMPILATIONDB_UTARGET=target,\n __COMPILATIONDB_USOURCE=source,\n __COMPILATIONDB_UACTION=user_action,\n __COMPILATIONDB_ENV=env,\n )\n\n # TODO: Technically, these next two lines should not be required: it should be fine to\n # cache the entries. However, they don't seem to update properly. Since they are quick\n # to re-generate disable caching and sidestep this problem.\n env.AlwaysBuild(entry)\n env.NoCache(entry)\n\n __COMPILATION_DB_ENTRIES.append(dbtarget)\n\n return target, source\n\n return emit_compilation_db_entry\n\n\ndef compilation_db_entry_action(target, source, env, **kw):\n \"\"\"\n Create a dictionary with evaluated command line, target, source\n and store that info as an attribute on the target\n (Which has been stored in __COMPILATION_DB_ENTRIES array\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :param kw:\n :return: None\n \"\"\"\n\n command = env[\"__COMPILATIONDB_UACTION\"].strfunction(\n target=env[\"__COMPILATIONDB_UTARGET\"],\n source=env[\"__COMPILATIONDB_USOURCE\"],\n env=env[\"__COMPILATIONDB_ENV\"],\n )\n\n entry = {\n \"directory\": env.Dir(\"#\").abspath,\n \"command\": command,\n \"file\": env[\"__COMPILATIONDB_USOURCE\"][0],\n \"target\": env['__COMPILATIONDB_UTARGET'][0]\n }\n\n target[0].write(entry)\n\n\ndef write_compilation_db(target, source, env):\n entries = []\n\n use_abspath = env['COMPILATIONDB_USE_ABSPATH'] in [True, 1, 'True', 'true']\n\n for s in __COMPILATION_DB_ENTRIES:\n entry = s.read()\n source_file = entry['file']\n target_file = entry['target']\n\n if use_abspath:\n source_file = source_file.abspath\n target_file = target_file.abspath\n else:\n source_file = source_file.path\n target_file = target_file.path\n\n path_entry = {'directory': entry['directory'],\n 'command': entry['command'],\n 'file': source_file,\n 'target': target_file}\n\n entries.append(path_entry)\n\n with open(target[0].path, \"w\") as target_file:\n json.dump(\n entries, target_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n\n\ndef scan_compilation_db(node, env, path):\n return __COMPILATION_DB_ENTRIES\n\n\ndef compilation_db_emitter(target, source, env):\n \"\"\" fix up the source/targets \"\"\"\n\n # Someone called env.CompilationDatabase('my_targetname.json')\n if not target and len(source) == 1:\n target = source\n\n # Default target name is compilation_db.json\n if not target:\n target = ['compile_commands.json', ]\n\n # No source should have been passed. Drop it.\n if source:\n source = []\n\n return target, source\n\n\ndef generate(env, **kwargs):\n static_obj, shared_obj = SCons.Tool.createObjBuilders(env)\n\n env[\"COMPILATIONDB_COMSTR\"] = kwargs.get(\n \"COMPILATIONDB_COMSTR\", \"Building compilation database $TARGET\"\n )\n\n components_by_suffix = itertools.chain(\n itertools.product(\n CSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CCCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCCCOM\"),\n ],\n ),\n itertools.product(\n CXXSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CXXCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCXXCOM\"),\n ],\n ),\n itertools.product(\n ASSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASCOM\")],\n ),\n itertools.product(\n ASPPSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASPPCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASPPCOM\")],\n ),\n )\n\n for entry in components_by_suffix:\n suffix = entry[0]\n builder, base_emitter, command = entry[1]\n\n # Assumes a dictionary emitter\n emitter = builder.emitter.get(suffix, False)\n if emitter:\n # We may not have tools installed which initialize all or any of\n # cxx, cc, or assembly. If not skip resetting the respective emitter.\n builder.emitter[suffix] = SCons.Builder.ListEmitter(\n [emitter, make_emit_compilation_DB_entry(command), ]\n )\n\n env[\"BUILDERS\"][\"__COMPILATIONDB_Entry\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(compilation_db_entry_action, None),\n )\n\n env[\"BUILDERS\"][\"CompilationDatabase\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(write_compilation_db, \"$COMPILATIONDB_COMSTR\"),\n target_scanner=SCons.Scanner.Scanner(\n function=scan_compilation_db, node_class=None\n ),\n emitter=compilation_db_emitter,\n suffix='json',\n )\n\n env['COMPILATIONDB_USE_ABSPATH'] = False\n\n\ndef exists(env):\n return True\n", "path": "SCons/Tool/compilation_db.py"}]} | 3,403 | 563 |
gh_patches_debug_40818 | rasdani/github-patches | git_diff | UTNkar__moore-195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use person number for appoint->overturn field
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Prerequisites
* [X] Put an X between the brackets on this line if you have done all of the
following:
* Reproduced the problem with clear cache.
* (If running the application locally:) Made sure your running the newest version on the development branch
* Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues
### Description
Currently the `overturn` field within the `appoint` view of `Position` has a overturn field that works with usernames. The username(s) entered will be appointed to the position, no matter if they applied or not.
_Problem_: Within the old system a similar feature was available, but it was by person number. According to the management team this is preferable.
### Steps to Reproduce
1. Create new `Position` with past deadline
2. Click `appoint` in the overview
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
</issue>
<code>
[start of src/involvement/forms.py]
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.utils.translation import ugettext_lazy as _
4
5 from involvement.models import Application, Reference
6 from utils.forms import AdvancedModelMultipleChoiceField
7
8
9 class ApplicationForm(forms.ModelForm):
10 class Meta:
11 model = Application
12 exclude = ['position', 'applicant']
13 widgets = {
14 'cover_letter': forms.Textarea(attrs={'style': 'height: 200px',
15 'class': 'form-control'}),
16 'qualifications': forms.Textarea(attrs={'style': 'height: 200px',
17 'class': 'form-control'}),
18 }
19
20 def clean_status(self):
21 status = self.cleaned_data['status']
22 if status not in ['draft', 'submitted'] \
23 or (self.initial['status'] == 'submitted'
24 and status == 'draft'):
25 raise forms.ValidationError(_('The submitted status was invalid.'))
26 return status
27
28
29 ReferenceFormSet = forms.inlineformset_factory(
30 Application,
31 Reference,
32 fields=('name', 'position', 'email', 'phone_number', 'comment'),
33 widgets={
34 'name': forms.TextInput(attrs={'class': 'form-control'}),
35 'position': forms.TextInput(attrs={'class': 'form-control'}),
36 'email': forms.TextInput(attrs={'class': 'form-control'}),
37 'phone_number': forms.TextInput(attrs={'class': 'form-control'}),
38 'comment': forms.TextInput(attrs={'class': 'form-control'}),
39 },
40 extra=0,
41 )
42
43
44 class ApprovalForm(forms.ModelForm):
45 status = forms.ChoiceField(
46 choices=(
47 ('submitted', '---------'),
48 ('approved', _('Approved')),
49 ('disapproved', _('Disapproved')),
50 ),
51 )
52
53 class Meta:
54 model = Application
55 fields = []
56
57 def clean_status(self):
58 status = self.cleaned_data['status']
59 if status not in ['submitted', 'approved', 'disapproved']:
60 raise forms.ValidationError(_('The submitted status was invalid.'))
61 return status
62
63 def save(self, commit=True):
64 self.instance.status = self.cleaned_data['status']
65
66 super(ApprovalForm, self).save(commit)
67
68
69 class AppointmentForm(forms.Form):
70 appoint = AdvancedModelMultipleChoiceField(
71 Application.objects.none(),
72 widget=forms.CheckboxSelectMultiple(),
73 required=False,
74 )
75 overturn = forms.CharField(
76 required=False,
77 label=_('Overturn'),
78 help_text=_('Enter a comma separated list of users you want to '
79 'appoint to the position, even though did not apply for '
80 'the position.')
81 )
82
83 def __init__(self, position, *args, **kwargs):
84 super(AppointmentForm, self).__init__(*args, **kwargs)
85 self.position = position
86 self.fields['appoint'].queryset = position.applications.filter(
87 status__in=['submitted', 'approved', 'appointed', 'turned_down']
88 )
89 self.initial['appoint'] = position.applications.filter(
90 status='appointed'
91 )
92
93 def clean_overturn(self):
94 string = self.cleaned_data['overturn']
95 string = string.replace(' ', '')
96 if string == '':
97 return []
98 else:
99 users = string.split(',')
100 for u in users:
101 if not get_user_model().objects.filter(
102 username=u
103 ).exists():
104 raise forms.ValidationError(
105 _('No user with the username %(user)s exists.'),
106 params={'user': u},
107 )
108 elif self.position.applications.filter(
109 applicant__username=u
110 ).exclude(
111 status='draft'
112 ).exists():
113 raise forms.ValidationError(
114 _('User %(user)s already applied for this position '
115 'and can not be appointed through the overturn '
116 'field.'),
117 params={'user': u},
118 )
119 return users
120
121 def clean(self):
122 super(AppointmentForm, self).clean()
123 appoint = self.cleaned_data.get('appoint', [])
124 overturn = self.cleaned_data.get('overturn', [])
125 nr_appointment = len(appoint) + len(overturn)
126 if nr_appointment > self.position.appointments:
127 raise forms.ValidationError(
128 _('You cannot appoint %(current)s applicants. The maximum '
129 'for this position is %(max)s.'),
130 params={
131 'current': nr_appointment,
132 'max': self.position.appointments,
133 },
134 )
135 return self.cleaned_data
136
137 def save(self):
138 for application in self.fields['appoint'].queryset:
139 if application in self.cleaned_data['appoint']:
140 application.status = 'appointed'
141 else:
142 application.status = 'turned_down'
143 application.save()
144
145 for user in self.cleaned_data['overturn']:
146 user = get_user_model().objects.get(
147 username=user
148 )
149 appl, created = Application.objects.get_or_create(
150 position=self.position,
151 applicant=user,
152 defaults={'status': 'appointed'}
153 )
154 if not created:
155 appl.status = 'appointed'
156 appl.save()
157
[end of src/involvement/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/involvement/forms.py b/src/involvement/forms.py
--- a/src/involvement/forms.py
+++ b/src/involvement/forms.py
@@ -3,6 +3,7 @@
from django.utils.translation import ugettext_lazy as _
from involvement.models import Application, Reference
+from members.forms import PersonNumberField
from utils.forms import AdvancedModelMultipleChoiceField
@@ -75,9 +76,9 @@
overturn = forms.CharField(
required=False,
label=_('Overturn'),
- help_text=_('Enter a comma separated list of users you want to '
- 'appoint to the position, even though did not apply for '
- 'the position.')
+ help_text=_('Enter a comma separated list of person numbers you want '
+ 'to appoint to the position, even though did not apply for'
+ ' the position.')
)
def __init__(self, position, *args, **kwargs):
@@ -96,26 +97,35 @@
if string == '':
return []
else:
- users = string.split(',')
- for u in users:
+ pnrs = string.split(',')
+ users = []
+ for pnr in pnrs:
+ date, number = PersonNumberField().to_python(pnr)
if not get_user_model().objects.filter(
- username=u
+ birthday=date,
+ person_number_ext=number,
).exists():
raise forms.ValidationError(
- _('No user with the username %(user)s exists.'),
- params={'user': u},
+ _('No user with the person number %(pnr)s exists.'),
+ params={'pnr': pnr},
)
elif self.position.applications.filter(
- applicant__username=u
+ applicant__birthday=date,
+ applicant__person_number_ext=number,
).exclude(
status='draft'
).exists():
raise forms.ValidationError(
- _('User %(user)s already applied for this position '
- 'and can not be appointed through the overturn '
- 'field.'),
- params={'user': u},
+ _('User with person number %(pnr)s already applied for'
+ ' this position and can not be appointed through the'
+ ' overturn field.'),
+ params={'pnr': pnr},
)
+ else:
+ users.append(get_user_model().objects.get(
+ birthday=date,
+ person_number_ext=number,
+ ))
return users
def clean(self):
@@ -143,9 +153,6 @@
application.save()
for user in self.cleaned_data['overturn']:
- user = get_user_model().objects.get(
- username=user
- )
appl, created = Application.objects.get_or_create(
position=self.position,
applicant=user,
| {"golden_diff": "diff --git a/src/involvement/forms.py b/src/involvement/forms.py\n--- a/src/involvement/forms.py\n+++ b/src/involvement/forms.py\n@@ -3,6 +3,7 @@\n from django.utils.translation import ugettext_lazy as _\n \n from involvement.models import Application, Reference\n+from members.forms import PersonNumberField\n from utils.forms import AdvancedModelMultipleChoiceField\n \n \n@@ -75,9 +76,9 @@\n overturn = forms.CharField(\n required=False,\n label=_('Overturn'),\n- help_text=_('Enter a comma separated list of users you want to '\n- 'appoint to the position, even though did not apply for '\n- 'the position.')\n+ help_text=_('Enter a comma separated list of person numbers you want '\n+ 'to appoint to the position, even though did not apply for'\n+ ' the position.')\n )\n \n def __init__(self, position, *args, **kwargs):\n@@ -96,26 +97,35 @@\n if string == '':\n return []\n else:\n- users = string.split(',')\n- for u in users:\n+ pnrs = string.split(',')\n+ users = []\n+ for pnr in pnrs:\n+ date, number = PersonNumberField().to_python(pnr)\n if not get_user_model().objects.filter(\n- username=u\n+ birthday=date,\n+ person_number_ext=number,\n ).exists():\n raise forms.ValidationError(\n- _('No user with the username %(user)s exists.'),\n- params={'user': u},\n+ _('No user with the person number %(pnr)s exists.'),\n+ params={'pnr': pnr},\n )\n elif self.position.applications.filter(\n- applicant__username=u\n+ applicant__birthday=date,\n+ applicant__person_number_ext=number,\n ).exclude(\n status='draft'\n ).exists():\n raise forms.ValidationError(\n- _('User %(user)s already applied for this position '\n- 'and can not be appointed through the overturn '\n- 'field.'),\n- params={'user': u},\n+ _('User with person number %(pnr)s already applied for'\n+ ' this position and can not be appointed through the'\n+ ' overturn field.'),\n+ params={'pnr': pnr},\n )\n+ else:\n+ users.append(get_user_model().objects.get(\n+ birthday=date,\n+ person_number_ext=number,\n+ ))\n return users\n \n def clean(self):\n@@ -143,9 +153,6 @@\n application.save()\n \n for user in self.cleaned_data['overturn']:\n- user = get_user_model().objects.get(\n- username=user\n- )\n appl, created = Application.objects.get_or_create(\n position=self.position,\n applicant=user,\n", "issue": "Use person number for appoint->overturn field\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Prerequisites\r\n\r\n* [X] Put an X between the brackets on this line if you have done all of the\r\nfollowing:\r\n * Reproduced the problem with clear cache.\r\n * (If running the application locally:) Made sure your running the newest version on the development branch\r\n * Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues\r\n\r\n### Description\r\n\r\nCurrently the `overturn` field within the `appoint` view of `Position` has a overturn field that works with usernames. The username(s) entered will be appointed to the position, no matter if they applied or not.\r\n\r\n_Problem_: Within the old system a similar feature was available, but it was by person number. According to the management team this is preferable.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new `Position` with past deadline\r\n2. Click `appoint` in the overview\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom involvement.models import Application, Reference\nfrom utils.forms import AdvancedModelMultipleChoiceField\n\n\nclass ApplicationForm(forms.ModelForm):\n class Meta:\n model = Application\n exclude = ['position', 'applicant']\n widgets = {\n 'cover_letter': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n 'qualifications': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n }\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['draft', 'submitted'] \\\n or (self.initial['status'] == 'submitted'\n and status == 'draft'):\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n\nReferenceFormSet = forms.inlineformset_factory(\n Application,\n Reference,\n fields=('name', 'position', 'email', 'phone_number', 'comment'),\n widgets={\n 'name': forms.TextInput(attrs={'class': 'form-control'}),\n 'position': forms.TextInput(attrs={'class': 'form-control'}),\n 'email': forms.TextInput(attrs={'class': 'form-control'}),\n 'phone_number': forms.TextInput(attrs={'class': 'form-control'}),\n 'comment': forms.TextInput(attrs={'class': 'form-control'}),\n },\n extra=0,\n)\n\n\nclass ApprovalForm(forms.ModelForm):\n status = forms.ChoiceField(\n choices=(\n ('submitted', '---------'),\n ('approved', _('Approved')),\n ('disapproved', _('Disapproved')),\n ),\n )\n\n class Meta:\n model = Application\n fields = []\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['submitted', 'approved', 'disapproved']:\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n def save(self, commit=True):\n self.instance.status = self.cleaned_data['status']\n\n super(ApprovalForm, self).save(commit)\n\n\nclass AppointmentForm(forms.Form):\n appoint = AdvancedModelMultipleChoiceField(\n Application.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n required=False,\n )\n overturn = forms.CharField(\n required=False,\n label=_('Overturn'),\n help_text=_('Enter a comma separated list of users you want to '\n 'appoint to the position, even though did not apply for '\n 'the position.')\n )\n\n def __init__(self, position, *args, **kwargs):\n super(AppointmentForm, self).__init__(*args, **kwargs)\n self.position = position\n self.fields['appoint'].queryset = position.applications.filter(\n status__in=['submitted', 'approved', 'appointed', 'turned_down']\n )\n self.initial['appoint'] = position.applications.filter(\n status='appointed'\n )\n\n def clean_overturn(self):\n string = self.cleaned_data['overturn']\n string = string.replace(' ', '')\n if string == '':\n return []\n else:\n users = string.split(',')\n for u in users:\n if not get_user_model().objects.filter(\n username=u\n ).exists():\n raise forms.ValidationError(\n _('No user with the username %(user)s exists.'),\n params={'user': u},\n )\n elif self.position.applications.filter(\n applicant__username=u\n ).exclude(\n status='draft'\n ).exists():\n raise forms.ValidationError(\n _('User %(user)s already applied for this position '\n 'and can not be appointed through the overturn '\n 'field.'),\n params={'user': u},\n )\n return users\n\n def clean(self):\n super(AppointmentForm, self).clean()\n appoint = self.cleaned_data.get('appoint', [])\n overturn = self.cleaned_data.get('overturn', [])\n nr_appointment = len(appoint) + len(overturn)\n if nr_appointment > self.position.appointments:\n raise forms.ValidationError(\n _('You cannot appoint %(current)s applicants. The maximum '\n 'for this position is %(max)s.'),\n params={\n 'current': nr_appointment,\n 'max': self.position.appointments,\n },\n )\n return self.cleaned_data\n\n def save(self):\n for application in self.fields['appoint'].queryset:\n if application in self.cleaned_data['appoint']:\n application.status = 'appointed'\n else:\n application.status = 'turned_down'\n application.save()\n\n for user in self.cleaned_data['overturn']:\n user = get_user_model().objects.get(\n username=user\n )\n appl, created = Application.objects.get_or_create(\n position=self.position,\n applicant=user,\n defaults={'status': 'appointed'}\n )\n if not created:\n appl.status = 'appointed'\n appl.save()\n", "path": "src/involvement/forms.py"}]} | 2,186 | 610 |
gh_patches_debug_16671 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: FrontEnd routing issues
### Description
The header in the application routes the application incorrectly and needs to be fixed. If it routes the anchor tag incorrectly, we'll be having a nested routes tree of /page1/page2/...
### What should have happened?
Its end output must be like this
```html
<a class="dark-white-text menu-text" href="/howitworks">How CiviWiki Works</a>
```
instead of
```html
<a class="dark-white-text menu-text" href="howitworks">How CiviWiki Works</a>
```
### What browser(s) are you seeing the problem on?
All
</issue>
<code>
[start of project/frontend_views/urls.py]
1 from django.urls import path
2 from frontend_views import views
3
4 urlpatterns = [
5 path("about/", views.about_view, name="about"),
6 path("support_us/", views.support_us_view, name="support us"),
7 path("howitworks/", views.how_it_works_view, name="how it works"),
8 path("profile/<str:username>/", views.user_profile, name="profile"),
9 path("thread/<int:thread_id>/", views.issue_thread, name="issue thread"),
10 path("profile/", views.user_profile, name="default_profile"),
11 path("", views.base_view, name="base"),
12 path("thread/<int:thread_id>/csv/", views.civi2csv, name="civi2csv"),
13 ]
14
[end of project/frontend_views/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py
--- a/project/frontend_views/urls.py
+++ b/project/frontend_views/urls.py
@@ -3,10 +3,10 @@
urlpatterns = [
path("about/", views.about_view, name="about"),
- path("support_us/", views.support_us_view, name="support us"),
- path("howitworks/", views.how_it_works_view, name="how it works"),
+ path("support_us/", views.support_us_view, name="support_us"),
+ path("howitworks/", views.how_it_works_view, name="how_it_works"),
path("profile/<str:username>/", views.user_profile, name="profile"),
- path("thread/<int:thread_id>/", views.issue_thread, name="issue thread"),
+ path("thread/<int:thread_id>/", views.issue_thread, name="issue_thread"),
path("profile/", views.user_profile, name="default_profile"),
path("", views.base_view, name="base"),
path("thread/<int:thread_id>/csv/", views.civi2csv, name="civi2csv"),
| {"golden_diff": "diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py\n--- a/project/frontend_views/urls.py\n+++ b/project/frontend_views/urls.py\n@@ -3,10 +3,10 @@\n \n urlpatterns = [\n path(\"about/\", views.about_view, name=\"about\"),\n- path(\"support_us/\", views.support_us_view, name=\"support us\"),\n- path(\"howitworks/\", views.how_it_works_view, name=\"how it works\"),\n+ path(\"support_us/\", views.support_us_view, name=\"support_us\"),\n+ path(\"howitworks/\", views.how_it_works_view, name=\"how_it_works\"),\n path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n- path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue thread\"),\n+ path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue_thread\"),\n path(\"profile/\", views.user_profile, name=\"default_profile\"),\n path(\"\", views.base_view, name=\"base\"),\n path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n", "issue": "[BUG]: FrontEnd routing issues\n### Description\r\n\r\nThe header in the application routes the application incorrectly and needs to be fixed. If it routes the anchor tag incorrectly, we'll be having a nested routes tree of /page1/page2/...\r\n\r\n\r\n\r\n### What should have happened?\r\nIts end output must be like this\r\n```html\r\n<a class=\"dark-white-text menu-text\" href=\"/howitworks\">How CiviWiki Works</a>\r\n```\r\ninstead of \r\n```html\r\n<a class=\"dark-white-text menu-text\" href=\"howitworks\">How CiviWiki Works</a>\r\n```\r\n\r\n### What browser(s) are you seeing the problem on?\r\nAll\r\n\n", "before_files": [{"content": "from django.urls import path\nfrom frontend_views import views\n\nurlpatterns = [\n path(\"about/\", views.about_view, name=\"about\"),\n path(\"support_us/\", views.support_us_view, name=\"support us\"),\n path(\"howitworks/\", views.how_it_works_view, name=\"how it works\"),\n path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue thread\"),\n path(\"profile/\", views.user_profile, name=\"default_profile\"),\n path(\"\", views.base_view, name=\"base\"),\n path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n]\n", "path": "project/frontend_views/urls.py"}]} | 851 | 254 |
gh_patches_debug_23139 | rasdani/github-patches | git_diff | Pyomo__pyomo-2740 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate the old 'pyomo' command syntax
In earlier releases, we supported commands like
` pyomo --solver=glpk foo.py bar.dat`
And we moved to a nested pyomo command, which uses the following syntax:
` pyomo solve --solver=glpk foo.py bar.dat`
However, to enable graceful transition for users, we added a hack to transition to the old syntax if it looks like the user is not specifying a valid subcommand. However, this leads to confusing error messages, and I think it's time to deprecate that hack and start throwing warnings that this hack will be removed in an upcoming release.
</issue>
<code>
[start of pyomo/scripting/pyomo_main.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 import sys
13 import copy
14
15 try:
16 import pkg_resources
17
18 pyomo_commands = pkg_resources.iter_entry_points('pyomo.command')
19 except:
20 pyomo_commands = []
21 #
22 # Load modules associated with Plugins that are defined in
23 # EGG files.
24 #
25 for entrypoint in pyomo_commands:
26 try:
27 plugin_class = entrypoint.load()
28 except Exception:
29 exctype, err, tb = sys.exc_info() # BUG?
30 import traceback
31
32 msg = (
33 "Error loading pyomo.command entry point %s:\nOriginal %s: %s\n"
34 "Traceback:\n%s"
35 % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb)))
36 )
37 # clear local variables to remove circular references
38 exctype = err = tb = None
39 # TODO: Should this just log an error and re-raise the original
40 # exception?
41 raise ImportError(msg)
42
43
44 def main(args=None):
45 #
46 # Load subcommands
47 #
48 from pyomo.scripting import pyomo_parser
49 import pyomo.environ
50
51 #
52 # Parse the arguments
53 #
54 parser = pyomo_parser.get_parser()
55 if args is None:
56 args = copy.copy(sys.argv[1:])
57 #
58 # This is a hack to convert a command-line to a 'solve' subcommand
59 #
60 if not args:
61 args.append('-h')
62 # FIXME: This should use the logger and not print()
63 if args[0][0] == '-':
64 if args[0] not in ['-h', '--help', '--version']:
65 print("WARNING: converting to the 'pyomo solve' subcommand")
66 args = ['solve'] + args[0:]
67 elif args[0] not in pyomo_parser.subparsers:
68 print("WARNING: converting to the 'pyomo solve' subcommand")
69 args = ['solve'] + args[0:]
70 #
71 # Process arguments
72 #
73 _options, _unparsed = parser.parse_known_args(args)
74 #
75 # Process the results
76 #
77 if _options.func.__code__.co_argcount == 1:
78 #
79 # If the execution function only accepts one argument, then we
80 # create an exception if there are unparsed arguments.
81 #
82 if len(_unparsed) > 0:
83 #
84 # Re-parse the command-line to create an exception
85 #
86 parser.parse_args(_unparsed)
87 retval = _options.func(_options)
88 else:
89 retval = _options.func(_options, _unparsed)
90 return retval
91
92
93 def main_console_script():
94 "This is the entry point for the main Pyomo script"
95 # Note that we eat the retval data structure and only return the
96 # process return code
97 ans = main()
98 try:
99 return ans.errorcode
100 except AttributeError:
101 return ans
102
103
104 if __name__ == '__main__':
105 sys.exit(main_console_script())
106
[end of pyomo/scripting/pyomo_main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py
--- a/pyomo/scripting/pyomo_main.py
+++ b/pyomo/scripting/pyomo_main.py
@@ -11,6 +11,7 @@
import sys
import copy
+from pyomo.common.deprecation import deprecation_warning
try:
import pkg_resources
@@ -59,13 +60,16 @@
#
if not args:
args.append('-h')
- # FIXME: This should use the logger and not print()
if args[0][0] == '-':
if args[0] not in ['-h', '--help', '--version']:
- print("WARNING: converting to the 'pyomo solve' subcommand")
+ deprecation_warning("Running the 'pyomo' script with no subcommand is deprecated. "
+ "Defaulting to 'pyomo solve'",
+ version='TBD')
args = ['solve'] + args[0:]
elif args[0] not in pyomo_parser.subparsers:
- print("WARNING: converting to the 'pyomo solve' subcommand")
+ deprecation_warning("Running the 'pyomo' script with no subcommand is deprecated. "
+ "Defaulting to 'pyomo solve'",
+ version='TBD')
args = ['solve'] + args[0:]
#
# Process arguments
| {"golden_diff": "diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py\n--- a/pyomo/scripting/pyomo_main.py\n+++ b/pyomo/scripting/pyomo_main.py\n@@ -11,6 +11,7 @@\n \n import sys\n import copy\n+from pyomo.common.deprecation import deprecation_warning\n \n try:\n import pkg_resources\n@@ -59,13 +60,16 @@\n #\n if not args:\n args.append('-h')\n- # FIXME: This should use the logger and not print()\n if args[0][0] == '-':\n if args[0] not in ['-h', '--help', '--version']:\n- print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n+ deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n+ \"Defaulting to 'pyomo solve'\",\n+ version='TBD')\n args = ['solve'] + args[0:]\n elif args[0] not in pyomo_parser.subparsers:\n- print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n+ deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n+ \"Defaulting to 'pyomo solve'\",\n+ version='TBD')\n args = ['solve'] + args[0:]\n #\n # Process arguments\n", "issue": "Deprecate the old 'pyomo' command syntax\nIn earlier releases, we supported commands like\r\n` pyomo --solver=glpk foo.py bar.dat`\r\nAnd we moved to a nested pyomo command, which uses the following syntax:\r\n` pyomo solve --solver=glpk foo.py bar.dat`\r\n\r\nHowever, to enable graceful transition for users, we added a hack to transition to the old syntax if it looks like the user is not specifying a valid subcommand. However, this leads to confusing error messages, and I think it's time to deprecate that hack and start throwing warnings that this hack will be removed in an upcoming release.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys\nimport copy\n\ntry:\n import pkg_resources\n\n pyomo_commands = pkg_resources.iter_entry_points('pyomo.command')\nexcept:\n pyomo_commands = []\n#\n# Load modules associated with Plugins that are defined in\n# EGG files.\n#\nfor entrypoint in pyomo_commands:\n try:\n plugin_class = entrypoint.load()\n except Exception:\n exctype, err, tb = sys.exc_info() # BUG?\n import traceback\n\n msg = (\n \"Error loading pyomo.command entry point %s:\\nOriginal %s: %s\\n\"\n \"Traceback:\\n%s\"\n % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb)))\n )\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the original\n # exception?\n raise ImportError(msg)\n\n\ndef main(args=None):\n #\n # Load subcommands\n #\n from pyomo.scripting import pyomo_parser\n import pyomo.environ\n\n #\n # Parse the arguments\n #\n parser = pyomo_parser.get_parser()\n if args is None:\n args = copy.copy(sys.argv[1:])\n #\n # This is a hack to convert a command-line to a 'solve' subcommand\n #\n if not args:\n args.append('-h')\n # FIXME: This should use the logger and not print()\n if args[0][0] == '-':\n if args[0] not in ['-h', '--help', '--version']:\n print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n args = ['solve'] + args[0:]\n elif args[0] not in pyomo_parser.subparsers:\n print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n args = ['solve'] + args[0:]\n #\n # Process arguments\n #\n _options, _unparsed = parser.parse_known_args(args)\n #\n # Process the results\n #\n if _options.func.__code__.co_argcount == 1:\n #\n # If the execution function only accepts one argument, then we\n # create an exception if there are unparsed arguments.\n #\n if len(_unparsed) > 0:\n #\n # Re-parse the command-line to create an exception\n #\n parser.parse_args(_unparsed)\n retval = _options.func(_options)\n else:\n retval = _options.func(_options, _unparsed)\n return retval\n\n\ndef main_console_script():\n \"This is the entry point for the main Pyomo script\"\n # Note that we eat the retval data structure and only return the\n # process return code\n ans = main()\n try:\n return ans.errorcode\n except AttributeError:\n return ans\n\n\nif __name__ == '__main__':\n sys.exit(main_console_script())\n", "path": "pyomo/scripting/pyomo_main.py"}]} | 1,642 | 306 |
gh_patches_debug_13095 | rasdani/github-patches | git_diff | huggingface__accelerate-1960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
patch_environment does not revert previously existing keys
### System Info
```Shell
does not matter here
```
### Information
- [ ] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [ ] My own task or dataset (give details below)
### Reproduction
```python
os.environ["FOO"] = "BAR"
with patch_environment(FOO="BAZ"):
assert os.environ["FOO"] == "BAZ"
assert os.environ["F00"] == "BAR" # KeyError
```
### Expected behavior
After leaving the context, the initial environment variables should be restored.
I noticed this while going through the code. If this is an issue, LMK and I'll provide a fix + tests. If, in the context that this function is used, it's not a problem, feel free to close the issue.
</issue>
<code>
[start of src/accelerate/utils/other.py]
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import socket
17 from contextlib import contextmanager
18 from types import MethodType
19
20 import torch
21
22 from ..commands.config.default import write_basic_config # noqa: F401
23 from ..state import PartialState
24 from .constants import FSDP_PYTORCH_VERSION
25 from .dataclasses import DistributedType
26 from .imports import is_deepspeed_available, is_tpu_available
27 from .transformer_engine import convert_model
28 from .versions import is_torch_version
29
30
31 if is_deepspeed_available():
32 from deepspeed import DeepSpeedEngine
33
34 if is_tpu_available(check_device=False):
35 import torch_xla.core.xla_model as xm
36
37
38 def is_compiled_module(module):
39 """
40 Check whether the module was compiled with torch.compile()
41 """
42 if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
43 return False
44 return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
45
46
47 def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
48 """
49 Extract a model from its distributed containers.
50
51 Args:
52 model (`torch.nn.Module`):
53 The model to extract.
54 keep_fp32_wrapper (`bool`, *optional*):
55 Whether to remove mixed precision hooks from the model.
56
57 Returns:
58 `torch.nn.Module`: The extracted model.
59 """
60 options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
61
62 is_compiled = is_compiled_module(model)
63 if is_compiled:
64 compiled_model = model
65 model = model._orig_mod
66
67 if is_deepspeed_available():
68 options += (DeepSpeedEngine,)
69
70 if is_torch_version(">=", FSDP_PYTORCH_VERSION):
71 from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
72
73 options += (FSDP,)
74
75 while isinstance(model, options):
76 model = model.module
77
78 if not keep_fp32_wrapper:
79 forward = getattr(model, "forward")
80 original_forward = model.__dict__.pop("_original_forward", None)
81 if original_forward is not None:
82 while hasattr(forward, "__wrapped__"):
83 forward = forward.__wrapped__
84 if forward == original_forward:
85 break
86 model.forward = MethodType(forward, model)
87 if getattr(model, "_converted_to_transformer_engine", False):
88 convert_model(model, to_transformer_engine=False)
89
90 if is_compiled:
91 compiled_model._orig_mod = model
92 model = compiled_model
93
94 return model
95
96
97 def wait_for_everyone():
98 """
99 Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
100
101 <Tip warning={true}>
102
103 Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
104
105 </Tip>
106 """
107 PartialState().wait_for_everyone()
108
109
110 def save(obj, f):
111 """
112 Save the data to disk. Use in place of `torch.save()`.
113
114 Args:
115 obj: The data to save
116 f: The file (or file-like object) to use to save the data
117 """
118 if PartialState().distributed_type == DistributedType.TPU:
119 xm.save(obj, f)
120 elif PartialState().local_process_index == 0:
121 torch.save(obj, f)
122
123
124 @contextmanager
125 def clear_environment():
126 """
127 A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.
128
129 When this context exits, the cached `os.environ` will be back.
130
131 Example:
132
133 ```python
134 >>> import os
135 >>> from accelerate.utils import clear_environment
136
137 >>> os.environ["FOO"] = "bar"
138 >>> with clear_environment():
139 ... print(os.environ)
140 ... os.environ["FOO"] = "new_bar"
141 ... print(os.environ["FOO"])
142 {}
143 new_bar
144
145 >>> print(os.environ["FOO"])
146 bar
147 ```
148 """
149 _old_os_environ = os.environ
150 os.environ = dict()
151
152 yield
153
154 os.environ = _old_os_environ
155
156
157 @contextmanager
158 def patch_environment(**kwargs):
159 """
160 A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
161
162 Will convert the values in `kwargs` to strings and upper-case all the keys.
163
164 Example:
165
166 ```python
167 >>> import os
168 >>> from accelerate.utils import patch_environment
169
170 >>> with patch_environment(FOO="bar"):
171 ... print(os.environ["FOO"]) # prints "bar"
172 >>> print(os.environ["FOO"]) # raises KeyError
173 ```
174 """
175 for key, value in kwargs.items():
176 os.environ[key.upper()] = str(value)
177
178 yield
179
180 for key in kwargs:
181 if key.upper() in os.environ:
182 del os.environ[key.upper()]
183
184
185 def get_pretty_name(obj):
186 """
187 Gets a pretty name from `obj`.
188 """
189 if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
190 obj = getattr(obj, "__class__", obj)
191 if hasattr(obj, "__qualname__"):
192 return obj.__qualname__
193 if hasattr(obj, "__name__"):
194 return obj.__name__
195 return str(obj)
196
197
198 def merge_dicts(source, destination):
199 """
200 Recursively merges two dictionaries.
201
202 Args:
203 source (`dict`): The dictionary to merge into `destination`.
204 destination (`dict`): The dictionary to merge `source` into.
205 """
206 for key, value in source.items():
207 if isinstance(value, dict):
208 node = destination.setdefault(key, {})
209 merge_dicts(value, node)
210 else:
211 destination[key] = value
212
213 return destination
214
215
216 def is_port_in_use(port: int = None) -> bool:
217 """
218 Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
219 run and need to see if the port is already in use.
220 """
221 if port is None:
222 port = 29500
223 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
224 return s.connect_ex(("localhost", port)) == 0
225
226
227 def convert_bytes(size):
228 "Converts `size` from bytes to the largest possible unit"
229 for x in ["bytes", "KB", "MB", "GB", "TB"]:
230 if size < 1024.0:
231 return f"{round(size, 2)} {x}"
232 size /= 1024.0
233
234 return f"{round(size, 2)} PB"
235
[end of src/accelerate/utils/other.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py
--- a/src/accelerate/utils/other.py
+++ b/src/accelerate/utils/other.py
@@ -172,14 +172,22 @@
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
+ existing_vars = {}
for key, value in kwargs.items():
- os.environ[key.upper()] = str(value)
+ key = key.upper()
+ if key in os.environ:
+ existing_vars[key] = os.environ[key]
+ os.environ[key] = str(value)
yield
for key in kwargs:
- if key.upper() in os.environ:
- del os.environ[key.upper()]
+ key = key.upper()
+ if key in existing_vars:
+ # restore previous value
+ os.environ[key] = existing_vars[key]
+ else:
+ os.environ.pop(key, None)
def get_pretty_name(obj):
| {"golden_diff": "diff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -172,14 +172,22 @@\n >>> print(os.environ[\"FOO\"]) # raises KeyError\n ```\n \"\"\"\n+ existing_vars = {}\n for key, value in kwargs.items():\n- os.environ[key.upper()] = str(value)\n+ key = key.upper()\n+ if key in os.environ:\n+ existing_vars[key] = os.environ[key]\n+ os.environ[key] = str(value)\n \n yield\n \n for key in kwargs:\n- if key.upper() in os.environ:\n- del os.environ[key.upper()]\n+ key = key.upper()\n+ if key in existing_vars:\n+ # restore previous value\n+ os.environ[key] = existing_vars[key]\n+ else:\n+ os.environ.pop(key, None)\n \n \n def get_pretty_name(obj):\n", "issue": "patch_environment does not revert previously existing keys\n### System Info\n\n```Shell\ndoes not matter here\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [ ] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [ ] My own task or dataset (give details below)\n\n### Reproduction\n\n```python\r\nos.environ[\"FOO\"] = \"BAR\"\r\nwith patch_environment(FOO=\"BAZ\"):\r\n assert os.environ[\"FOO\"] == \"BAZ\"\r\nassert os.environ[\"F00\"] == \"BAR\" # KeyError\r\n```\n\n### Expected behavior\n\nAfter leaving the context, the initial environment variables should be restored.\r\n\r\nI noticed this while going through the code. If this is an issue, LMK and I'll provide a fix + tests. If, in the context that this function is used, it's not a problem, feel free to close the issue.\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport socket\nfrom contextlib import contextmanager\nfrom types import MethodType\n\nimport torch\n\nfrom ..commands.config.default import write_basic_config # noqa: F401\nfrom ..state import PartialState\nfrom .constants import FSDP_PYTORCH_VERSION\nfrom .dataclasses import DistributedType\nfrom .imports import is_deepspeed_available, is_tpu_available\nfrom .transformer_engine import convert_model\nfrom .versions import is_torch_version\n\n\nif is_deepspeed_available():\n from deepspeed import DeepSpeedEngine\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef is_compiled_module(module):\n \"\"\"\n Check whether the module was compiled with torch.compile()\n \"\"\"\n if is_torch_version(\"<\", \"2.0.0\") or not hasattr(torch, \"_dynamo\"):\n return False\n return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)\n\n\ndef extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (`torch.nn.Module`):\n The model to extract.\n keep_fp32_wrapper (`bool`, *optional*):\n Whether to remove mixed precision hooks from the model.\n\n Returns:\n `torch.nn.Module`: The extracted model.\n \"\"\"\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n\n is_compiled = is_compiled_module(model)\n if is_compiled:\n compiled_model = model\n model = model._orig_mod\n\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n if is_torch_version(\">=\", FSDP_PYTORCH_VERSION):\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n\n options += (FSDP,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = MethodType(forward, model)\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n\n if is_compiled:\n compiled_model._orig_mod = model\n model = compiled_model\n\n return model\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n <Tip warning={true}>\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n\n </Tip>\n \"\"\"\n PartialState().wait_for_everyone()\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of `torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if PartialState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif PartialState().local_process_index == 0:\n torch.save(obj, f)\n\n\n@contextmanager\ndef clear_environment():\n \"\"\"\n A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.\n\n When this context exits, the cached `os.environ` will be back.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import clear_environment\n\n >>> os.environ[\"FOO\"] = \"bar\"\n >>> with clear_environment():\n ... print(os.environ)\n ... os.environ[\"FOO\"] = \"new_bar\"\n ... print(os.environ[\"FOO\"])\n {}\n new_bar\n\n >>> print(os.environ[\"FOO\"])\n bar\n ```\n \"\"\"\n _old_os_environ = os.environ\n os.environ = dict()\n\n yield\n\n os.environ = _old_os_environ\n\n\n@contextmanager\ndef patch_environment(**kwargs):\n \"\"\"\n A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n\n Will convert the values in `kwargs` to strings and upper-case all the keys.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import patch_environment\n\n >>> with patch_environment(FOO=\"bar\"):\n ... print(os.environ[\"FOO\"]) # prints \"bar\"\n >>> print(os.environ[\"FOO\"]) # raises KeyError\n ```\n \"\"\"\n for key, value in kwargs.items():\n os.environ[key.upper()] = str(value)\n\n yield\n\n for key in kwargs:\n if key.upper() in os.environ:\n del os.environ[key.upper()]\n\n\ndef get_pretty_name(obj):\n \"\"\"\n Gets a pretty name from `obj`.\n \"\"\"\n if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n obj = getattr(obj, \"__class__\", obj)\n if hasattr(obj, \"__qualname__\"):\n return obj.__qualname__\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n\n\ndef merge_dicts(source, destination):\n \"\"\"\n Recursively merges two dictionaries.\n\n Args:\n source (`dict`): The dictionary to merge into `destination`.\n destination (`dict`): The dictionary to merge `source` into.\n \"\"\"\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n\n return destination\n\n\ndef is_port_in_use(port: int = None) -> bool:\n \"\"\"\n Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been\n run and need to see if the port is already in use.\n \"\"\"\n if port is None:\n port = 29500\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex((\"localhost\", port)) == 0\n\n\ndef convert_bytes(size):\n \"Converts `size` from bytes to the largest possible unit\"\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if size < 1024.0:\n return f\"{round(size, 2)} {x}\"\n size /= 1024.0\n\n return f\"{round(size, 2)} PB\"\n", "path": "src/accelerate/utils/other.py"}]} | 2,975 | 222 |
gh_patches_debug_27643 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-7218 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Substack] extractor fails ("Unable to extract preloads")
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
global
### Provide a description that is worded well enough to be understood
Ran yt-dlp on a rather ordinary Substack post, and received an error message along with a request to report the issue.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['--restrict-filenames', '-o', '%(title)s-%(id)s-%(uploader)s.%(ext)s', '-w', '-v', 'https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [392389b7d]
[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-5.15.88-with-glibc2.36 (OpenSSL 1.1.1t 7 Feb 2023, glibc 2.36)
[debug] exe versions: ffmpeg 4.4.3 (setts), ffprobe 4.4.3
[debug] Optional libraries: certifi-3021.03.16, pycrypto-3.17
[debug] Proxy map: {}
[debug] Loaded 1786 extractors
[Substack] Extracting URL: https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients
[Substack] how-big-pharma-calculates-a-patients: Downloading webpage
ERROR: [Substack] how-big-pharma-calculates-a-patients: Unable to extract preloads; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 694, in extract
ie_result = self._real_extract(url)
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/substack.py", line 80, in _real_extract
webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 1256, in _search_json
json_string = self._search_regex(
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 1242, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
```
</issue>
<code>
[start of yt_dlp/extractor/substack.py]
1 import re
2 import urllib.parse
3
4 from .common import InfoExtractor
5 from ..utils import str_or_none, traverse_obj
6
7
8 class SubstackIE(InfoExtractor):
9 _VALID_URL = r'https?://(?P<username>[\w-]+)\.substack\.com/p/(?P<id>[\w-]+)'
10 _TESTS = [{
11 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
12 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
13 'info_dict': {
14 'id': '47660949',
15 'ext': 'mp4',
16 'title': 'I MADE A VLOG',
17 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',
18 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',
19 'uploader': 'Maybe Baby',
20 'uploader_id': '33628',
21 }
22 }, {
23 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',
24 'md5': '0a63eacec877a1171a62cfa69710fcea',
25 'info_dict': {
26 'id': '51045592',
27 'ext': 'mpga',
28 'title': "🎧 Dear Danny: I found my boyfriend's secret Twitter account",
29 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',
30 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',
31 'uploader': 'Maybe Baby',
32 'uploader_id': '33628',
33 }
34 }, {
35 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',
36 'md5': 'fd3c07077b02444ff0130715b5f632bb',
37 'info_dict': {
38 'id': '47368578',
39 'ext': 'mp4',
40 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',
41 'description': 'md5:b96234a2906c7d854d5229818d889515',
42 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',
43 'uploader': "Andrew Zimmern's Spilled Milk ",
44 'uploader_id': '577659',
45 }
46 }]
47
48 @classmethod
49 def _extract_embed_urls(cls, url, webpage):
50 if not re.search(r'<script[^>]+src=["\']https://substackcdn.com/[^"\']+\.js', webpage):
51 return
52
53 mobj = re.search(r'{[^}]*["\']subdomain["\']\s*:\s*["\'](?P<subdomain>[^"]+)', webpage)
54 if mobj:
55 parsed = urllib.parse.urlparse(url)
56 yield parsed._replace(netloc=f'{mobj.group("subdomain")}.substack.com').geturl()
57 raise cls.StopExtraction()
58
59 def _extract_video_formats(self, video_id, username):
60 formats, subtitles = [], {}
61 for video_format in ('hls', 'mp4'):
62 video_url = f'https://{username}.substack.com/api/v1/video/upload/{video_id}/src?type={video_format}'
63
64 if video_format == 'hls':
65 fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)
66 formats.extend(fmts)
67 self._merge_subtitles(subs, target=subtitles)
68 else:
69 formats.append({
70 'url': video_url,
71 'ext': video_format,
72 })
73
74 return formats, subtitles
75
76 def _real_extract(self, url):
77 display_id, username = self._match_valid_url(url).group('id', 'username')
78 webpage = self._download_webpage(url, display_id)
79
80 webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
81
82 post_type = webpage_info['post']['type']
83 formats, subtitles = [], {}
84 if post_type == 'podcast':
85 formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}
86 elif post_type == 'video':
87 formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], username)
88 else:
89 self.raise_no_formats(f'Page type "{post_type}" is not supported')
90
91 return {
92 'id': str(webpage_info['post']['id']),
93 'formats': formats,
94 'subtitles': subtitles,
95 'title': traverse_obj(webpage_info, ('post', 'title')),
96 'description': traverse_obj(webpage_info, ('post', 'description')),
97 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),
98 'uploader': traverse_obj(webpage_info, ('pub', 'name')),
99 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),
100 }
101
[end of yt_dlp/extractor/substack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yt_dlp/extractor/substack.py b/yt_dlp/extractor/substack.py
--- a/yt_dlp/extractor/substack.py
+++ b/yt_dlp/extractor/substack.py
@@ -2,7 +2,7 @@
import urllib.parse
from .common import InfoExtractor
-from ..utils import str_or_none, traverse_obj
+from ..utils import js_to_json, str_or_none, traverse_obj
class SubstackIE(InfoExtractor):
@@ -14,7 +14,7 @@
'id': '47660949',
'ext': 'mp4',
'title': 'I MADE A VLOG',
- 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',
+ 'description': 'md5:9248af9a759321e1027226f988f54d96',
'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',
'uploader': 'Maybe Baby',
'uploader_id': '33628',
@@ -77,7 +77,9 @@
display_id, username = self._match_valid_url(url).group('id', 'username')
webpage = self._download_webpage(url, display_id)
- webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
+ webpage_info = self._parse_json(self._search_json(
+ r'window\._preloads\s*=\s*JSON\.parse\(', webpage, 'json string',
+ display_id, transform_source=js_to_json, contains_pattern=r'"{(?s:.+)}"'), display_id)
post_type = webpage_info['post']['type']
formats, subtitles = [], {}
| {"golden_diff": "diff --git a/yt_dlp/extractor/substack.py b/yt_dlp/extractor/substack.py\n--- a/yt_dlp/extractor/substack.py\n+++ b/yt_dlp/extractor/substack.py\n@@ -2,7 +2,7 @@\n import urllib.parse\n \n from .common import InfoExtractor\n-from ..utils import str_or_none, traverse_obj\n+from ..utils import js_to_json, str_or_none, traverse_obj\n \n \n class SubstackIE(InfoExtractor):\n@@ -14,7 +14,7 @@\n 'id': '47660949',\n 'ext': 'mp4',\n 'title': 'I MADE A VLOG',\n- 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',\n+ 'description': 'md5:9248af9a759321e1027226f988f54d96',\n 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n@@ -77,7 +77,9 @@\n display_id, username = self._match_valid_url(url).group('id', 'username')\n webpage = self._download_webpage(url, display_id)\n \n- webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\n+ webpage_info = self._parse_json(self._search_json(\n+ r'window\\._preloads\\s*=\\s*JSON\\.parse\\(', webpage, 'json string',\n+ display_id, transform_source=js_to_json, contains_pattern=r'\"{(?s:.+)}\"'), display_id)\n \n post_type = webpage_info['post']['type']\n formats, subtitles = [], {}\n", "issue": "[Substack] extractor fails (\"Unable to extract preloads\")\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nglobal\n\n### Provide a description that is worded well enough to be understood\n\nRan yt-dlp on a rather ordinary Substack post, and received an error message along with a request to report the issue.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['--restrict-filenames', '-o', '%(title)s-%(id)s-%(uploader)s.%(ext)s', '-w', '-v', 'https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [392389b7d]\r\n[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-5.15.88-with-glibc2.36 (OpenSSL 1.1.1t 7 Feb 2023, glibc 2.36)\r\n[debug] exe versions: ffmpeg 4.4.3 (setts), ffprobe 4.4.3\r\n[debug] Optional libraries: certifi-3021.03.16, pycrypto-3.17\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1786 extractors\r\n[Substack] Extracting URL: https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients\r\n[Substack] how-big-pharma-calculates-a-patients: Downloading webpage\r\nERROR: [Substack] how-big-pharma-calculates-a-patients: Unable to extract preloads; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 694, in extract\r\n ie_result = self._real_extract(url)\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/substack.py\", line 80, in _real_extract\r\n webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 1256, in _search_json\r\n json_string = self._search_regex(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 1242, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\n```\n\n", "before_files": [{"content": "import re\nimport urllib.parse\n\nfrom .common import InfoExtractor\nfrom ..utils import str_or_none, traverse_obj\n\n\nclass SubstackIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<username>[\\w-]+)\\.substack\\.com/p/(?P<id>[\\w-]+)'\n _TESTS = [{\n 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',\n 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',\n 'info_dict': {\n 'id': '47660949',\n 'ext': 'mp4',\n 'title': 'I MADE A VLOG',\n 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',\n 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',\n 'md5': '0a63eacec877a1171a62cfa69710fcea',\n 'info_dict': {\n 'id': '51045592',\n 'ext': 'mpga',\n 'title': \"\ud83c\udfa7 Dear Danny: I found my boyfriend's secret Twitter account\",\n 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',\n 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',\n 'md5': 'fd3c07077b02444ff0130715b5f632bb',\n 'info_dict': {\n 'id': '47368578',\n 'ext': 'mp4',\n 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',\n 'description': 'md5:b96234a2906c7d854d5229818d889515',\n 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',\n 'uploader': \"Andrew Zimmern's Spilled Milk \",\n 'uploader_id': '577659',\n }\n }]\n\n @classmethod\n def _extract_embed_urls(cls, url, webpage):\n if not re.search(r'<script[^>]+src=[\"\\']https://substackcdn.com/[^\"\\']+\\.js', webpage):\n return\n\n mobj = re.search(r'{[^}]*[\"\\']subdomain[\"\\']\\s*:\\s*[\"\\'](?P<subdomain>[^\"]+)', webpage)\n if mobj:\n parsed = urllib.parse.urlparse(url)\n yield parsed._replace(netloc=f'{mobj.group(\"subdomain\")}.substack.com').geturl()\n raise cls.StopExtraction()\n\n def _extract_video_formats(self, video_id, username):\n formats, subtitles = [], {}\n for video_format in ('hls', 'mp4'):\n video_url = f'https://{username}.substack.com/api/v1/video/upload/{video_id}/src?type={video_format}'\n\n if video_format == 'hls':\n fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)\n formats.extend(fmts)\n self._merge_subtitles(subs, target=subtitles)\n else:\n formats.append({\n 'url': video_url,\n 'ext': video_format,\n })\n\n return formats, subtitles\n\n def _real_extract(self, url):\n display_id, username = self._match_valid_url(url).group('id', 'username')\n webpage = self._download_webpage(url, display_id)\n\n webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\n\n post_type = webpage_info['post']['type']\n formats, subtitles = [], {}\n if post_type == 'podcast':\n formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}\n elif post_type == 'video':\n formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], username)\n else:\n self.raise_no_formats(f'Page type \"{post_type}\" is not supported')\n\n return {\n 'id': str(webpage_info['post']['id']),\n 'formats': formats,\n 'subtitles': subtitles,\n 'title': traverse_obj(webpage_info, ('post', 'title')),\n 'description': traverse_obj(webpage_info, ('post', 'description')),\n 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),\n 'uploader': traverse_obj(webpage_info, ('pub', 'name')),\n 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),\n }\n", "path": "yt_dlp/extractor/substack.py"}]} | 3,191 | 454 |
gh_patches_debug_21797 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-664 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
403 attempting to login

It looks like if I open another tab I am logged into my account though.
</issue>
<code>
[start of bookwyrm/views/authentication.py]
1 ''' class views for login/register views '''
2 from django.contrib.auth import authenticate, login, logout
3 from django.contrib.auth.decorators import login_required
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect
6 from django.template.response import TemplateResponse
7 from django.utils import timezone
8 from django.utils.decorators import method_decorator
9 from django.views import View
10
11 from bookwyrm import forms, models
12 from bookwyrm.settings import DOMAIN
13
14
15 # pylint: disable= no-self-use
16 class Login(View):
17 ''' authenticate an existing user '''
18 def get(self, request):
19 ''' login page '''
20 if request.user.is_authenticated:
21 return redirect('/')
22 # sene user to the login page
23 data = {
24 'title': 'Login',
25 'login_form': forms.LoginForm(),
26 'register_form': forms.RegisterForm(),
27 }
28 return TemplateResponse(request, 'login.html', data)
29
30 def post(self, request):
31 ''' authentication action '''
32 login_form = forms.LoginForm(request.POST)
33
34 localname = login_form.data['localname']
35 if '@' in localname: # looks like an email address to me
36 email = localname
37 try:
38 username = models.User.objects.get(email=email)
39 except models.User.DoesNotExist: # maybe it's a full username?
40 username = localname
41 else:
42 username = '%s@%s' % (localname, DOMAIN)
43 password = login_form.data['password']
44 user = authenticate(request, username=username, password=password)
45 if user is not None:
46 # successful login
47 login(request, user)
48 user.last_active_date = timezone.now()
49 user.save(broadcast=False)
50 return redirect(request.GET.get('next', '/'))
51
52 # login errors
53 login_form.non_field_errors = 'Username or password are incorrect'
54 register_form = forms.RegisterForm()
55 data = {
56 'login_form': login_form,
57 'register_form': register_form
58 }
59 return TemplateResponse(request, 'login.html', data)
60
61
62 class Register(View):
63 ''' register a user '''
64 def post(self, request):
65 ''' join the server '''
66 if not models.SiteSettings.get().allow_registration:
67 invite_code = request.POST.get('invite_code')
68
69 if not invite_code:
70 raise PermissionDenied
71
72 invite = get_object_or_404(models.SiteInvite, code=invite_code)
73 if not invite.valid():
74 raise PermissionDenied
75 else:
76 invite = None
77
78 form = forms.RegisterForm(request.POST)
79 errors = False
80 if not form.is_valid():
81 errors = True
82
83 localname = form.data['localname'].strip()
84 email = form.data['email']
85 password = form.data['password']
86
87 # check localname and email uniqueness
88 if models.User.objects.filter(localname=localname).first():
89 form.errors['localname'] = [
90 'User with this username already exists']
91 errors = True
92
93 if errors:
94 data = {
95 'login_form': forms.LoginForm(),
96 'register_form': form,
97 'invite': invite,
98 'valid': invite.valid() if invite else True,
99 }
100 if invite:
101 return TemplateResponse(request, 'invite.html', data)
102 return TemplateResponse(request, 'login.html', data)
103
104 username = '%s@%s' % (localname, DOMAIN)
105 user = models.User.objects.create_user(
106 username, email, password, localname=localname, local=True)
107 if invite:
108 invite.times_used += 1
109 invite.save()
110
111 login(request, user)
112 return redirect('/')
113
114
115 @method_decorator(login_required, name='dispatch')
116 class Logout(View):
117 ''' log out '''
118 def get(self, request):
119 ''' done with this place! outa here! '''
120 logout(request)
121 return redirect('/')
122
[end of bookwyrm/views/authentication.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/authentication.py b/bookwyrm/views/authentication.py
--- a/bookwyrm/views/authentication.py
+++ b/bookwyrm/views/authentication.py
@@ -6,6 +6,7 @@
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.decorators import method_decorator
+from django.views.decorators.csrf import csrf_exempt
from django.views import View
from bookwyrm import forms, models
@@ -13,6 +14,7 @@
# pylint: disable= no-self-use
+@method_decorator(csrf_exempt, name='dispatch')
class Login(View):
''' authenticate an existing user '''
def get(self, request):
@@ -29,6 +31,8 @@
def post(self, request):
''' authentication action '''
+ if request.user.is_authenticated:
+ return redirect('/')
login_form = forms.LoginForm(request.POST)
localname = login_form.data['localname']
| {"golden_diff": "diff --git a/bookwyrm/views/authentication.py b/bookwyrm/views/authentication.py\n--- a/bookwyrm/views/authentication.py\n+++ b/bookwyrm/views/authentication.py\n@@ -6,6 +6,7 @@\n from django.template.response import TemplateResponse\n from django.utils import timezone\n from django.utils.decorators import method_decorator\n+from django.views.decorators.csrf import csrf_exempt\n from django.views import View\n \n from bookwyrm import forms, models\n@@ -13,6 +14,7 @@\n \n \n # pylint: disable= no-self-use\n+@method_decorator(csrf_exempt, name='dispatch')\n class Login(View):\n ''' authenticate an existing user '''\n def get(self, request):\n@@ -29,6 +31,8 @@\n \n def post(self, request):\n ''' authentication action '''\n+ if request.user.is_authenticated:\n+ return redirect('/')\n login_form = forms.LoginForm(request.POST)\n \n localname = login_form.data['localname']\n", "issue": "403 attempting to login\n\r\n\r\nIt looks like if I open another tab I am logged into my account though. \n", "before_files": [{"content": "''' class views for login/register views '''\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import DOMAIN\n\n\n# pylint: disable= no-self-use\nclass Login(View):\n ''' authenticate an existing user '''\n def get(self, request):\n ''' login page '''\n if request.user.is_authenticated:\n return redirect('/')\n # sene user to the login page\n data = {\n 'title': 'Login',\n 'login_form': forms.LoginForm(),\n 'register_form': forms.RegisterForm(),\n }\n return TemplateResponse(request, 'login.html', data)\n\n def post(self, request):\n ''' authentication action '''\n login_form = forms.LoginForm(request.POST)\n\n localname = login_form.data['localname']\n if '@' in localname: # looks like an email address to me\n email = localname\n try:\n username = models.User.objects.get(email=email)\n except models.User.DoesNotExist: # maybe it's a full username?\n username = localname\n else:\n username = '%s@%s' % (localname, DOMAIN)\n password = login_form.data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n # successful login\n login(request, user)\n user.last_active_date = timezone.now()\n user.save(broadcast=False)\n return redirect(request.GET.get('next', '/'))\n\n # login errors\n login_form.non_field_errors = 'Username or password are incorrect'\n register_form = forms.RegisterForm()\n data = {\n 'login_form': login_form,\n 'register_form': register_form\n }\n return TemplateResponse(request, 'login.html', data)\n\n\nclass Register(View):\n ''' register a user '''\n def post(self, request):\n ''' join the server '''\n if not models.SiteSettings.get().allow_registration:\n invite_code = request.POST.get('invite_code')\n\n if not invite_code:\n raise PermissionDenied\n\n invite = get_object_or_404(models.SiteInvite, code=invite_code)\n if not invite.valid():\n raise PermissionDenied\n else:\n invite = None\n\n form = forms.RegisterForm(request.POST)\n errors = False\n if not form.is_valid():\n errors = True\n\n localname = form.data['localname'].strip()\n email = form.data['email']\n password = form.data['password']\n\n # check localname and email uniqueness\n if models.User.objects.filter(localname=localname).first():\n form.errors['localname'] = [\n 'User with this username already exists']\n errors = True\n\n if errors:\n data = {\n 'login_form': forms.LoginForm(),\n 'register_form': form,\n 'invite': invite,\n 'valid': invite.valid() if invite else True,\n }\n if invite:\n return TemplateResponse(request, 'invite.html', data)\n return TemplateResponse(request, 'login.html', data)\n\n username = '%s@%s' % (localname, DOMAIN)\n user = models.User.objects.create_user(\n username, email, password, localname=localname, local=True)\n if invite:\n invite.times_used += 1\n invite.save()\n\n login(request, user)\n return redirect('/')\n\n\n@method_decorator(login_required, name='dispatch')\nclass Logout(View):\n ''' log out '''\n def get(self, request):\n ''' done with this place! outa here! '''\n logout(request)\n return redirect('/')\n", "path": "bookwyrm/views/authentication.py"}]} | 1,731 | 205 |
gh_patches_debug_18116 | rasdani/github-patches | git_diff | beeware__toga-2139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Android Read-only Multi-line text input displays suggestions (Spell Checking)
### Describe the bug
When using the "MultilineTextInput" with the "readonly" parameter set to "True" on Android, it will suggest spelling improvements based on dictionary candidates. This behavior results in underlined text on android devices when using a language other than the keyboard language or with unknown words in general.
### Steps to reproduce
1. Use MultilineTextInput in readonly with a set text
2. Compile for android and set the KeyBoard language to a different one than the text.
3. The suggestions should appear, underlining the Text.
### Expected behavior
Suggestions do not show up in MultilineTextInput if set to readonly.
### Screenshots
_No response_
### Environment
- Operating System: Fedora 38 Workstation / Lineage OS 20.0
- Python version: 3.11.5
- Software versions:
- Briefcase: 0.3.15
- Toga: 0.3.1
[tool.briefcase.app.trinker.android]
requires = [
#"toga-android~=0.3.1"
'git+https://github.com/beeware/toga.git#subdirectory=core',
'git+https://github.com/beeware/toga.git#subdirectory=android',
]
### Logs
```
```
### Additional context
I did some research on the topic and while I'm not really an Android dev, I think I found the problem. In "android/src/toga_android/widgets/textinput.py" we need to set the flag [TYPE_TEXT_FLAG_NO_SUGGESTIONS](https://developer.android.com/reference/android/text/InputType#TYPE_TEXT_FLAG_NO_SUGGESTIONS) if "readonly" is true.
As said before, I'm not really an Android dev and am not sure how this would be implemented.
</issue>
<code>
[start of android/src/toga_android/widgets/textinput.py]
1 from java import dynamic_proxy
2 from travertino.size import at_least
3
4 from android.text import InputType, TextWatcher
5 from android.view import Gravity, View
6 from android.widget import EditText
7 from toga_android.keys import toga_key
8
9 from .label import TextViewWidget
10
11
12 class TogaTextWatcher(dynamic_proxy(TextWatcher)):
13 def __init__(self, impl):
14 super().__init__()
15 self.impl = impl
16
17 def beforeTextChanged(self, _charSequence, _start, _count, _after):
18 pass
19
20 def afterTextChanged(self, _editable):
21 self.impl._on_change()
22
23 def onTextChanged(self, _charSequence, _start, _before, _count):
24 pass
25
26
27 class TogaKeyListener(dynamic_proxy(View.OnKeyListener)):
28 def __init__(self, impl):
29 super().__init__()
30 self.impl = impl
31
32 def onKey(self, _view, _key, _event):
33 event_info = toga_key(_event)
34 if event_info is None:
35 pass # pragma: nocover
36 else:
37 key_pressed = event_info["key"].value
38 if (key_pressed == "<enter>" or key_pressed == "numpad:enter") and (
39 int(_event.getAction()) == 1
40 ):
41 self.impl._on_confirm()
42 return False
43
44
45 class TogaFocusListener(dynamic_proxy(View.OnFocusChangeListener)):
46 def __init__(self, impl):
47 super().__init__()
48 self.impl = impl
49
50 def onFocusChange(self, view, has_focus):
51 if has_focus:
52 self.impl._on_gain_focus()
53 else:
54 self.impl._on_lose_focus()
55
56
57 class TextInput(TextViewWidget):
58 def create(self, input_type=InputType.TYPE_CLASS_TEXT):
59 self.native = EditText(self._native_activity)
60 self.native.setInputType(input_type)
61 self.cache_textview_defaults()
62
63 self.native.addTextChangedListener(TogaTextWatcher(self))
64 self.native.setOnKeyListener(TogaKeyListener(self))
65 self.native.setOnFocusChangeListener(TogaFocusListener(self))
66
67 def get_value(self):
68 return str(self.native.getText())
69
70 def set_value(self, value):
71 self.native.setText(value)
72
73 def get_readonly(self):
74 return not self.native.isFocusable()
75
76 def set_readonly(self, readonly):
77 if readonly:
78 # Implicitly calls setFocusableInTouchMode(False)
79 self.native.setFocusable(False)
80 else:
81 # Implicitly calls setFocusable(True)
82 self.native.setFocusableInTouchMode(True)
83
84 def get_placeholder(self):
85 return str(self.native.getHint())
86
87 def set_placeholder(self, value):
88 self.native.setHint(value)
89
90 def set_alignment(self, value):
91 self.set_textview_alignment(value, Gravity.CENTER_VERTICAL)
92
93 def set_error(self, error_message):
94 self.native.setError(error_message)
95
96 def clear_error(self):
97 self.native.setError(None)
98
99 def is_valid(self):
100 return self.native.getError() is None
101
102 def _on_change(self):
103 self.interface.on_change(None)
104 self.interface._validate()
105
106 def _on_confirm(self):
107 self.interface.on_confirm(None)
108
109 def _on_gain_focus(self):
110 self.interface.on_gain_focus(None)
111
112 def _on_lose_focus(self):
113 self.interface.on_lose_focus(None)
114
115 def rehint(self):
116 self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)
117 self.native.measure(View.MeasureSpec.UNSPECIFIED, View.MeasureSpec.UNSPECIFIED)
118 self.interface.intrinsic.height = self.native.getMeasuredHeight()
119
[end of android/src/toga_android/widgets/textinput.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/android/src/toga_android/widgets/textinput.py b/android/src/toga_android/widgets/textinput.py
--- a/android/src/toga_android/widgets/textinput.py
+++ b/android/src/toga_android/widgets/textinput.py
@@ -77,9 +77,19 @@
if readonly:
# Implicitly calls setFocusableInTouchMode(False)
self.native.setFocusable(False)
+ # Add TYPE_TEXT_FLAG_NO_SUGGESTIONS to the input type to disable suggestions
+ input_type = (
+ self.native.getInputType() | InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS
+ )
+ self.native.setInputType(input_type)
else:
# Implicitly calls setFocusable(True)
self.native.setFocusableInTouchMode(True)
+ # Remove TYPE_TEXT_FLAG_NO_SUGGESTIONS to enable suggestions
+ input_type = (
+ self.native.getInputType() & ~InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS
+ )
+ self.native.setInputType(input_type)
def get_placeholder(self):
return str(self.native.getHint())
| {"golden_diff": "diff --git a/android/src/toga_android/widgets/textinput.py b/android/src/toga_android/widgets/textinput.py\n--- a/android/src/toga_android/widgets/textinput.py\n+++ b/android/src/toga_android/widgets/textinput.py\n@@ -77,9 +77,19 @@\n if readonly:\n # Implicitly calls setFocusableInTouchMode(False)\n self.native.setFocusable(False)\n+ # Add TYPE_TEXT_FLAG_NO_SUGGESTIONS to the input type to disable suggestions\n+ input_type = (\n+ self.native.getInputType() | InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n+ )\n+ self.native.setInputType(input_type)\n else:\n # Implicitly calls setFocusable(True)\n self.native.setFocusableInTouchMode(True)\n+ # Remove TYPE_TEXT_FLAG_NO_SUGGESTIONS to enable suggestions\n+ input_type = (\n+ self.native.getInputType() & ~InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n+ )\n+ self.native.setInputType(input_type)\n \n def get_placeholder(self):\n return str(self.native.getHint())\n", "issue": "Android Read-only Multi-line text input displays suggestions (Spell Checking)\n### Describe the bug\n\nWhen using the \"MultilineTextInput\" with the \"readonly\" parameter set to \"True\" on Android, it will suggest spelling improvements based on dictionary candidates. This behavior results in underlined text on android devices when using a language other than the keyboard language or with unknown words in general.\n\n### Steps to reproduce\n\n1. Use MultilineTextInput in readonly with a set text\r\n2. Compile for android and set the KeyBoard language to a different one than the text.\r\n3. The suggestions should appear, underlining the Text.\n\n### Expected behavior\n\nSuggestions do not show up in MultilineTextInput if set to readonly.\n\n### Screenshots\n\n_No response_\n\n### Environment\n\n- Operating System: Fedora 38 Workstation / Lineage OS 20.0\r\n- Python version: 3.11.5\r\n- Software versions:\r\n - Briefcase: 0.3.15\r\n - Toga: 0.3.1 \r\n\r\n[tool.briefcase.app.trinker.android]\r\nrequires = [\r\n #\"toga-android~=0.3.1\"\r\n 'git+https://github.com/beeware/toga.git#subdirectory=core',\r\n 'git+https://github.com/beeware/toga.git#subdirectory=android',\r\n]\r\n\r\n\n\n### Logs\n\n```\r\n\r\n```\r\n\n\n### Additional context\n\nI did some research on the topic and while I'm not really an Android dev, I think I found the problem. In \"android/src/toga_android/widgets/textinput.py\" we need to set the flag [TYPE_TEXT_FLAG_NO_SUGGESTIONS](https://developer.android.com/reference/android/text/InputType#TYPE_TEXT_FLAG_NO_SUGGESTIONS) if \"readonly\" is true.\r\n\r\nAs said before, I'm not really an Android dev and am not sure how this would be implemented.\r\n\n", "before_files": [{"content": "from java import dynamic_proxy\nfrom travertino.size import at_least\n\nfrom android.text import InputType, TextWatcher\nfrom android.view import Gravity, View\nfrom android.widget import EditText\nfrom toga_android.keys import toga_key\n\nfrom .label import TextViewWidget\n\n\nclass TogaTextWatcher(dynamic_proxy(TextWatcher)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def beforeTextChanged(self, _charSequence, _start, _count, _after):\n pass\n\n def afterTextChanged(self, _editable):\n self.impl._on_change()\n\n def onTextChanged(self, _charSequence, _start, _before, _count):\n pass\n\n\nclass TogaKeyListener(dynamic_proxy(View.OnKeyListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onKey(self, _view, _key, _event):\n event_info = toga_key(_event)\n if event_info is None:\n pass # pragma: nocover\n else:\n key_pressed = event_info[\"key\"].value\n if (key_pressed == \"<enter>\" or key_pressed == \"numpad:enter\") and (\n int(_event.getAction()) == 1\n ):\n self.impl._on_confirm()\n return False\n\n\nclass TogaFocusListener(dynamic_proxy(View.OnFocusChangeListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onFocusChange(self, view, has_focus):\n if has_focus:\n self.impl._on_gain_focus()\n else:\n self.impl._on_lose_focus()\n\n\nclass TextInput(TextViewWidget):\n def create(self, input_type=InputType.TYPE_CLASS_TEXT):\n self.native = EditText(self._native_activity)\n self.native.setInputType(input_type)\n self.cache_textview_defaults()\n\n self.native.addTextChangedListener(TogaTextWatcher(self))\n self.native.setOnKeyListener(TogaKeyListener(self))\n self.native.setOnFocusChangeListener(TogaFocusListener(self))\n\n def get_value(self):\n return str(self.native.getText())\n\n def set_value(self, value):\n self.native.setText(value)\n\n def get_readonly(self):\n return not self.native.isFocusable()\n\n def set_readonly(self, readonly):\n if readonly:\n # Implicitly calls setFocusableInTouchMode(False)\n self.native.setFocusable(False)\n else:\n # Implicitly calls setFocusable(True)\n self.native.setFocusableInTouchMode(True)\n\n def get_placeholder(self):\n return str(self.native.getHint())\n\n def set_placeholder(self, value):\n self.native.setHint(value)\n\n def set_alignment(self, value):\n self.set_textview_alignment(value, Gravity.CENTER_VERTICAL)\n\n def set_error(self, error_message):\n self.native.setError(error_message)\n\n def clear_error(self):\n self.native.setError(None)\n\n def is_valid(self):\n return self.native.getError() is None\n\n def _on_change(self):\n self.interface.on_change(None)\n self.interface._validate()\n\n def _on_confirm(self):\n self.interface.on_confirm(None)\n\n def _on_gain_focus(self):\n self.interface.on_gain_focus(None)\n\n def _on_lose_focus(self):\n self.interface.on_lose_focus(None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)\n self.native.measure(View.MeasureSpec.UNSPECIFIED, View.MeasureSpec.UNSPECIFIED)\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n", "path": "android/src/toga_android/widgets/textinput.py"}]} | 1,950 | 234 |
gh_patches_debug_14420 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
XDG_DATA_DIRS environment variable being overriden when running packaged app
I have a simple app that runs a local Flask web server and opens a web browser to show it on start up using the following command:
webbrowser.open('http://localhost:%d' % port, new=2, autoraise=True)
When I run it in development mode, this works fine. However, when it's packaged using pyinstaller and run as an executable, I get this:
gvfs-open: http://localhost:5000: error opening location: No application is registered as handling this file
The order of the browser's web browser will try is the same in both instances:
'xdg-open', 'gvfs-open', 'gnome-open', 'x-www-browser', 'firefox'
I tried using specific commands using `webbrowser.get(NAME).open` but none worked (except Firefox).
It seems like the XDG_DATA_DIRS environment variable is being over ridden:
`print('XDG_DATA_DIRS: ' + os.environ.get('XDG_DATA_DIRS', 'not found'))`
yields
`/usr/share/ubuntu:/usr/share/gnome:/usr/local/share:/usr/share:/var/lib/snapd/desktop:/var/lib/snapd/desktop`
in development mode and
`SOURCE_DIR/dist/linux/app/share`
in the packaged executable.
I'm using Python 3.5.2 and pyinstaller 3.3.1 on Ubuntu 16.04 with the command
pyinstaller --add-data="static:static" app.py
Stack overflow link: https://stackoverflow.com/questions/51657864/pyinstaller-webbrowser-open-doesnt-work-in-packaged-app
</issue>
<code>
[start of PyInstaller/hooks/rthooks/pyi_rth_glib.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2022, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 import os
13 import sys
14
15 os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
16
[end of PyInstaller/hooks/rthooks/pyi_rth_glib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/rthooks/pyi_rth_glib.py b/PyInstaller/hooks/rthooks/pyi_rth_glib.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_glib.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_glib.py
@@ -12,4 +12,20 @@
import os
import sys
-os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
+# Prepend the frozen application's data dir to XDG_DATA_DIRS. We need to avoid overwriting the existing paths in order
+# to allow the frozen application to run system-installed applications (for example, launch a web browser via the
+# webbrowser module on Linux). Should the user desire complete isolation of the frozen application from the system,
+# they need to clean up XDG_DATA_DIRS at the start of their program (i.e., remove all entries but first).
+pyi_data_dir = os.path.join(sys._MEIPASS, 'share')
+
+xdg_data_dirs = os.environ.get('XDG_DATA_DIRS', None)
+if xdg_data_dirs:
+ if pyi_data_dir not in xdg_data_dirs:
+ xdg_data_dirs = pyi_data_dir + os.pathsep + xdg_data_dirs
+else:
+ xdg_data_dirs = pyi_data_dir
+os.environ['XDG_DATA_DIRS'] = xdg_data_dirs
+
+# Cleanup aux variables
+del xdg_data_dirs
+del pyi_data_dir
| {"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_glib.py b/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n@@ -12,4 +12,20 @@\n import os\n import sys\n \n-os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')\n+# Prepend the frozen application's data dir to XDG_DATA_DIRS. We need to avoid overwriting the existing paths in order\n+# to allow the frozen application to run system-installed applications (for example, launch a web browser via the\n+# webbrowser module on Linux). Should the user desire complete isolation of the frozen application from the system,\n+# they need to clean up XDG_DATA_DIRS at the start of their program (i.e., remove all entries but first).\n+pyi_data_dir = os.path.join(sys._MEIPASS, 'share')\n+\n+xdg_data_dirs = os.environ.get('XDG_DATA_DIRS', None)\n+if xdg_data_dirs:\n+ if pyi_data_dir not in xdg_data_dirs:\n+ xdg_data_dirs = pyi_data_dir + os.pathsep + xdg_data_dirs\n+else:\n+ xdg_data_dirs = pyi_data_dir\n+os.environ['XDG_DATA_DIRS'] = xdg_data_dirs\n+\n+# Cleanup aux variables\n+del xdg_data_dirs\n+del pyi_data_dir\n", "issue": "XDG_DATA_DIRS environment variable being overriden when running packaged app\nI have a simple app that runs a local Flask web server and opens a web browser to show it on start up using the following command: \r\n\r\n webbrowser.open('http://localhost:%d' % port, new=2, autoraise=True)\r\n\r\nWhen I run it in development mode, this works fine. However, when it's packaged using pyinstaller and run as an executable, I get this:\r\n\r\n gvfs-open: http://localhost:5000: error opening location: No application is registered as handling this file\r\n\r\nThe order of the browser's web browser will try is the same in both instances: \r\n\r\n 'xdg-open', 'gvfs-open', 'gnome-open', 'x-www-browser', 'firefox'\r\n\r\nI tried using specific commands using `webbrowser.get(NAME).open` but none worked (except Firefox). \r\n\r\nIt seems like the XDG_DATA_DIRS environment variable is being over ridden: \r\n\r\n`print('XDG_DATA_DIRS: ' + os.environ.get('XDG_DATA_DIRS', 'not found'))` \r\n\r\nyields \r\n\r\n`/usr/share/ubuntu:/usr/share/gnome:/usr/local/share:/usr/share:/var/lib/snapd/desktop:/var/lib/snapd/desktop` \r\n\r\nin development mode and \r\n\r\n`SOURCE_DIR/dist/linux/app/share`\r\n\r\nin the packaged executable. \r\n\r\nI'm using Python 3.5.2 and pyinstaller 3.3.1 on Ubuntu 16.04 with the command \r\n\r\n pyinstaller --add-data=\"static:static\" app.py\r\n\r\nStack overflow link: https://stackoverflow.com/questions/51657864/pyinstaller-webbrowser-open-doesnt-work-in-packaged-app\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\nimport os\nimport sys\n\nos.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_glib.py"}]} | 1,053 | 333 |
gh_patches_debug_28621 | rasdani/github-patches | git_diff | conan-io__conan-center-index-20413 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cocoyaxi: make deprecated
Specify library name and version: **cocoyaxi/***
---
- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).
- [ ] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).
- [ ] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.
</issue>
<code>
[start of recipes/cocoyaxi/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import check_min_cppstd
4 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
5 from conan.tools.files import copy, get
6 from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
7 import os
8
9 required_conan_version = ">=1.50.0"
10
11
12 class CocoyaxiConan(ConanFile):
13 name = "cocoyaxi"
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://github.com/idealvin/cocoyaxi"
16 license = "MIT"
17 description = "A go-style coroutine library in C++11 and more."
18 topics = ("cocoyaxi", "coroutine", "c++11")
19
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "shared": [True, False],
23 "fPIC": [True, False],
24 "with_libcurl": [True, False],
25 "with_openssl": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 "with_libcurl": False,
31 "with_openssl": False,
32 }
33
34 def config_options(self):
35 if self.settings.os == "Windows":
36 del self.options.fPIC
37
38 def configure(self):
39 if self.options.shared:
40 del self.options.fPIC
41
42 def requirements(self):
43 if self.options.with_libcurl:
44 self.requires("libcurl/7.80.0")
45 if self.options.with_libcurl or self.options.with_openssl:
46 self.requires("openssl/1.1.1q")
47
48 def validate(self):
49 if self.info.settings.compiler.cppstd:
50 check_min_cppstd(self, 11)
51 if self.info.options.with_libcurl:
52 if not self.info.options.with_openssl:
53 raise ConanInvalidConfiguration(f"{self.name} requires with_openssl=True when using with_libcurl=True")
54 if self.dependencies["libcurl"].options.with_ssl != "openssl":
55 raise ConanInvalidConfiguration(f"{self.name} requires libcurl:with_ssl='openssl' to be enabled")
56 if not self.dependencies["libcurl"].options.with_zlib:
57 raise ConanInvalidConfiguration(f"{self.name} requires libcurl:with_zlib=True to be enabled")
58
59 def layout(self):
60 cmake_layout(self, src_folder="src")
61
62 def source(self):
63 get(self, **self.conan_data["sources"][self.version],
64 destination=self.source_folder, strip_root=True)
65
66 def generate(self):
67 tc = CMakeToolchain(self)
68 if is_msvc(self):
69 tc.variables["STATIC_VS_CRT"] = is_msvc_static_runtime(self)
70 tc.variables["WITH_LIBCURL"] = self.options.with_libcurl
71 tc.variables["WITH_OPENSSL"] = self.options.with_openssl
72 tc.generate()
73 cd = CMakeDeps(self)
74 cd.generate()
75
76 def build(self):
77 cmake = CMake(self)
78 cmake.configure()
79 cmake.build()
80
81 def package(self):
82 copy(self, "LICENSE.md", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
83 cmake = CMake(self)
84 cmake.install()
85
86 def package_info(self):
87 self.cpp_info.set_property("cmake_file_name", "cocoyaxi")
88 self.cpp_info.set_property("cmake_target_name", "cocoyaxi::co")
89 # TODO: back to global scope in conan v2 once legacy generators removed
90 self.cpp_info.components["co"].libs = ["co"]
91
92 # TODO: to remove in conan v2 once legacy generators removed
93 self.cpp_info.components["co"].set_property("cmake_target_name", "cocoyaxi::co")
94 if self.options.with_libcurl:
95 self.cpp_info.components["co"].requires.append("libcurl::libcurl")
96 if self.options.with_libcurl or self.options.with_openssl:
97 self.cpp_info.components["co"].requires.append("openssl::openssl")
98
[end of recipes/cocoyaxi/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/cocoyaxi/all/conanfile.py b/recipes/cocoyaxi/all/conanfile.py
--- a/recipes/cocoyaxi/all/conanfile.py
+++ b/recipes/cocoyaxi/all/conanfile.py
@@ -30,6 +30,7 @@
"with_libcurl": False,
"with_openssl": False,
}
+ deprecated = "coost"
def config_options(self):
if self.settings.os == "Windows":
@@ -37,13 +38,13 @@
def configure(self):
if self.options.shared:
- del self.options.fPIC
+ self.options.rm_safe("fPIC")
def requirements(self):
if self.options.with_libcurl:
- self.requires("libcurl/7.80.0")
+ self.requires("libcurl/8.2.1")
if self.options.with_libcurl or self.options.with_openssl:
- self.requires("openssl/1.1.1q")
+ self.requires("openssl/[>=1.1 <4]")
def validate(self):
if self.info.settings.compiler.cppstd:
@@ -95,3 +96,7 @@
self.cpp_info.components["co"].requires.append("libcurl::libcurl")
if self.options.with_libcurl or self.options.with_openssl:
self.cpp_info.components["co"].requires.append("openssl::openssl")
+ if self.settings.os in ["Linux", "FreeBSD"]:
+ self.cpp_info.components["co"].system_libs.extend(["pthread", "dl", "m"])
+ elif self.settings.os == "Windows":
+ self.cpp_info.components["co"].system_libs.append("ws2_32")
| {"golden_diff": "diff --git a/recipes/cocoyaxi/all/conanfile.py b/recipes/cocoyaxi/all/conanfile.py\n--- a/recipes/cocoyaxi/all/conanfile.py\n+++ b/recipes/cocoyaxi/all/conanfile.py\n@@ -30,6 +30,7 @@\n \"with_libcurl\": False,\n \"with_openssl\": False,\n }\n+ deprecated = \"coost\"\n \n def config_options(self):\n if self.settings.os == \"Windows\":\n@@ -37,13 +38,13 @@\n \n def configure(self):\n if self.options.shared:\n- del self.options.fPIC\n+ self.options.rm_safe(\"fPIC\")\n \n def requirements(self):\n if self.options.with_libcurl:\n- self.requires(\"libcurl/7.80.0\")\n+ self.requires(\"libcurl/8.2.1\")\n if self.options.with_libcurl or self.options.with_openssl:\n- self.requires(\"openssl/1.1.1q\")\n+ self.requires(\"openssl/[>=1.1 <4]\")\n \n def validate(self):\n if self.info.settings.compiler.cppstd:\n@@ -95,3 +96,7 @@\n self.cpp_info.components[\"co\"].requires.append(\"libcurl::libcurl\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.cpp_info.components[\"co\"].requires.append(\"openssl::openssl\")\n+ if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n+ self.cpp_info.components[\"co\"].system_libs.extend([\"pthread\", \"dl\", \"m\"])\n+ elif self.settings.os == \"Windows\":\n+ self.cpp_info.components[\"co\"].system_libs.append(\"ws2_32\")\n", "issue": "cocoyaxi: make deprecated\nSpecify library name and version: **cocoyaxi/***\r\n\r\n---\r\n\r\n- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).\r\n- [ ] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).\r\n- [ ] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass CocoyaxiConan(ConanFile):\n name = \"cocoyaxi\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/idealvin/cocoyaxi\"\n license = \"MIT\"\n description = \"A go-style coroutine library in C++11 and more.\"\n topics = (\"cocoyaxi\", \"coroutine\", \"c++11\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_libcurl\": [True, False],\n \"with_openssl\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_libcurl\": False,\n \"with_openssl\": False,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n if self.options.with_libcurl:\n self.requires(\"libcurl/7.80.0\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.requires(\"openssl/1.1.1q\")\n\n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, 11)\n if self.info.options.with_libcurl:\n if not self.info.options.with_openssl:\n raise ConanInvalidConfiguration(f\"{self.name} requires with_openssl=True when using with_libcurl=True\")\n if self.dependencies[\"libcurl\"].options.with_ssl != \"openssl\":\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_ssl='openssl' to be enabled\")\n if not self.dependencies[\"libcurl\"].options.with_zlib:\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_zlib=True to be enabled\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n if is_msvc(self):\n tc.variables[\"STATIC_VS_CRT\"] = is_msvc_static_runtime(self)\n tc.variables[\"WITH_LIBCURL\"] = self.options.with_libcurl\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.generate()\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.md\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"cocoyaxi\")\n self.cpp_info.set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n # TODO: back to global scope in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].libs = [\"co\"]\n\n # TODO: to remove in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n if self.options.with_libcurl:\n self.cpp_info.components[\"co\"].requires.append(\"libcurl::libcurl\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.cpp_info.components[\"co\"].requires.append(\"openssl::openssl\")\n", "path": "recipes/cocoyaxi/all/conanfile.py"}]} | 1,786 | 385 |
gh_patches_debug_11424 | rasdani/github-patches | git_diff | cobbler__cobbler-3552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SafeConfigParser was removed in 3.12
### Describe the bug
From https://docs.python.org/3.12/whatsnew/3.12.html:
Several names deprecated in the configparser way back in 3.2 have been removed per gh-89336:
- configparser.ParsingError no longer has a filename attribute or argument. Use the source attribute and argument instead.
- configparser no longer has a SafeConfigParser class. Use the shorter ConfigParser name instead.
- configparser.ConfigParser no longer has a readfp method. Use read_file() instead.
<!--- HINT: You can paste gist.github.com links for long logs or larger files -->
### Steps to reproduce
1. systemctl start cobblerd
### Expected behavior
<!--- A clear and concise description of what you expected to happen. -->
### Cobbler version
<!--- Paste output from `cobbler version` -->
````paste below
Cobbler 3.4.0
````
### Operating system
Fedora 39
### Cobbler log
<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->
````paste below
cobblerd[1614]: INFO | Exception raised when loading module authorization.configfile
cobblerd[1614]: INFO | Exception occurred: <class 'ImportError'>
cobblerd[1614]: INFO | Exception value: cannot import name 'SafeConfigParser' from 'configparser' (/usr/lib64/python3.12/configparser.py)
````
### Screenshots
<!--- If applicable, add screenshots to help explain your problem. -->
### Additional information
<!--- Add any other context about the problem here. -->
</issue>
<code>
[start of cobbler/modules/authorization/configfile.py]
1 """
2 Authorization module that allow users listed in
3 /etc/cobbler/users.conf to be permitted to access resources.
4 For instance, when using authz_ldap, you want to use authn_configfile,
5 not authz_allowall, which will most likely NOT do what you want.
6 """
7 # SPDX-License-Identifier: GPL-2.0-or-later
8 # SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others
9 # SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
10
11
12 import os
13 from configparser import SafeConfigParser
14 from typing import TYPE_CHECKING, Any, Dict
15
16 if TYPE_CHECKING:
17 from cobbler.api import CobblerAPI
18
19
20 CONFIG_FILE = "/etc/cobbler/users.conf"
21
22
23 def register() -> str:
24 """
25 The mandatory Cobbler module registration hook.
26
27 :return: Always "authz".
28 """
29 return "authz"
30
31
32 def __parse_config() -> Dict[str, Dict[Any, Any]]:
33 """
34 Parse the the users.conf file.
35
36 :return: The data of the config file.
37 """
38 if not os.path.exists(CONFIG_FILE):
39 return {}
40 config = SafeConfigParser()
41 config.read(CONFIG_FILE)
42 alldata: Dict[str, Dict[str, Any]] = {}
43 groups = config.sections()
44 for group in groups:
45 alldata[str(group)] = {}
46 options = config.options(group)
47 for option in options:
48 alldata[group][option] = 1
49 return alldata
50
51
52 def authorize(
53 api_handle: "CobblerAPI",
54 user: str,
55 resource: str,
56 arg1: Any = None,
57 arg2: Any = None,
58 ) -> int:
59 """
60 Validate a user against a resource. All users in the file are permitted by this module.
61
62 :param api_handle: This parameter is not used currently.
63 :param user: The user to authorize.
64 :param resource: This parameter is not used currently.
65 :param arg1: This parameter is not used currently.
66 :param arg2: This parameter is not used currently.
67 :return: "0" if no authorized, "1" if authorized.
68 """
69 # FIXME: this must be modified to use the new ACL engine
70
71 data = __parse_config()
72 for _, group_data in data.items():
73 if user.lower() in group_data:
74 return 1
75 return 0
76
[end of cobbler/modules/authorization/configfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py
--- a/cobbler/modules/authorization/configfile.py
+++ b/cobbler/modules/authorization/configfile.py
@@ -10,7 +10,7 @@
import os
-from configparser import SafeConfigParser
+from configparser import ConfigParser
from typing import TYPE_CHECKING, Any, Dict
if TYPE_CHECKING:
@@ -37,7 +37,7 @@
"""
if not os.path.exists(CONFIG_FILE):
return {}
- config = SafeConfigParser()
+ config = ConfigParser()
config.read(CONFIG_FILE)
alldata: Dict[str, Dict[str, Any]] = {}
groups = config.sections()
| {"golden_diff": "diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py\n--- a/cobbler/modules/authorization/configfile.py\n+++ b/cobbler/modules/authorization/configfile.py\n@@ -10,7 +10,7 @@\n \n \n import os\n-from configparser import SafeConfigParser\n+from configparser import ConfigParser\n from typing import TYPE_CHECKING, Any, Dict\n \n if TYPE_CHECKING:\n@@ -37,7 +37,7 @@\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n- config = SafeConfigParser()\n+ config = ConfigParser()\n config.read(CONFIG_FILE)\n alldata: Dict[str, Dict[str, Any]] = {}\n groups = config.sections()\n", "issue": "SafeConfigParser was removed in 3.12\n### Describe the bug\r\n\r\nFrom https://docs.python.org/3.12/whatsnew/3.12.html:\r\n\r\nSeveral names deprecated in the configparser way back in 3.2 have been removed per gh-89336:\r\n- configparser.ParsingError no longer has a filename attribute or argument. Use the source attribute and argument instead.\r\n- configparser no longer has a SafeConfigParser class. Use the shorter ConfigParser name instead.\r\n- configparser.ConfigParser no longer has a readfp method. Use read_file() instead.\r\n\r\n\r\n<!--- HINT: You can paste gist.github.com links for long logs or larger files -->\r\n\r\n### Steps to reproduce\r\n\r\n1. systemctl start cobblerd\r\n\r\n### Expected behavior\r\n\r\n<!--- A clear and concise description of what you expected to happen. -->\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\nCobbler 3.4.0\r\n````\r\n\r\n### Operating system\r\n\r\nFedora 39\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\ncobblerd[1614]: INFO | Exception raised when loading module authorization.configfile\r\ncobblerd[1614]: INFO | Exception occurred: <class 'ImportError'>\r\ncobblerd[1614]: INFO | Exception value: cannot import name 'SafeConfigParser' from 'configparser' (/usr/lib64/python3.12/configparser.py)\r\n````\r\n\r\n### Screenshots\r\n\r\n<!--- If applicable, add screenshots to help explain your problem. -->\r\n\r\n### Additional information\r\n\r\n<!--- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nAuthorization module that allow users listed in\n/etc/cobbler/users.conf to be permitted to access resources.\nFor instance, when using authz_ldap, you want to use authn_configfile,\nnot authz_allowall, which will most likely NOT do what you want.\n\"\"\"\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\n\nimport os\nfrom configparser import SafeConfigParser\nfrom typing import TYPE_CHECKING, Any, Dict\n\nif TYPE_CHECKING:\n from cobbler.api import CobblerAPI\n\n\nCONFIG_FILE = \"/etc/cobbler/users.conf\"\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n\n :return: Always \"authz\".\n \"\"\"\n return \"authz\"\n\n\ndef __parse_config() -> Dict[str, Dict[Any, Any]]:\n \"\"\"\n Parse the the users.conf file.\n\n :return: The data of the config file.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n config = SafeConfigParser()\n config.read(CONFIG_FILE)\n alldata: Dict[str, Dict[str, Any]] = {}\n groups = config.sections()\n for group in groups:\n alldata[str(group)] = {}\n options = config.options(group)\n for option in options:\n alldata[group][option] = 1\n return alldata\n\n\ndef authorize(\n api_handle: \"CobblerAPI\",\n user: str,\n resource: str,\n arg1: Any = None,\n arg2: Any = None,\n) -> int:\n \"\"\"\n Validate a user against a resource. All users in the file are permitted by this module.\n\n :param api_handle: This parameter is not used currently.\n :param user: The user to authorize.\n :param resource: This parameter is not used currently.\n :param arg1: This parameter is not used currently.\n :param arg2: This parameter is not used currently.\n :return: \"0\" if no authorized, \"1\" if authorized.\n \"\"\"\n # FIXME: this must be modified to use the new ACL engine\n\n data = __parse_config()\n for _, group_data in data.items():\n if user.lower() in group_data:\n return 1\n return 0\n", "path": "cobbler/modules/authorization/configfile.py"}]} | 1,595 | 161 |
gh_patches_debug_19861 | rasdani/github-patches | git_diff | doccano__doccano-1958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug report] Static files are not copied on pip installation
How to reproduce the behaviour
---------
Seems like (some?) static files are not copied on pip installation.
For instance `http://site.com/favicon.ico` is available on Docker Compose installation. But it is 404'd on pip installation.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: CentOS 8.3
* Python Version Used: 3.8.8
* When you install doccano: v1.2.1
* How did you install doccano (Heroku button etc): pip
</issue>
<code>
[start of backend/config/urls.py]
1 """app URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/2.0/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 import os
17 import re
18
19 from django.conf import settings
20 from django.contrib import admin
21 from django.contrib.auth.views import TemplateView
22 from django.urls import include, path, re_path
23 from django.views.static import serve
24 from drf_yasg import openapi
25 from drf_yasg.views import get_schema_view
26
27 schema_view = get_schema_view(
28 openapi.Info(
29 title="doccano API",
30 default_version="v1",
31 description="doccano API description",
32 license=openapi.License(name="MIT License"),
33 ),
34 public=True,
35 )
36
37 urlpatterns = []
38 if settings.DEBUG or os.environ.get("STANDALONE", False):
39 # For showing images and audios in the case of pip and Docker.
40 urlpatterns.append(
41 re_path(
42 r"^%s(?P<path>.*)$" % re.escape(settings.MEDIA_URL.lstrip("/")),
43 serve,
44 {"document_root": settings.MEDIA_ROOT},
45 )
46 )
47
48 urlpatterns += [
49 path("admin/", admin.site.urls),
50 path("api-auth/", include("rest_framework.urls")),
51 path("v1/health/", include("health_check.urls")),
52 path("v1/", include("api.urls")),
53 path("v1/", include("roles.urls")),
54 path("v1/", include("users.urls")),
55 path("v1/", include("data_import.urls")),
56 path("v1/", include("data_export.urls")),
57 path("v1/", include("projects.urls")),
58 path("v1/projects/<int:project_id>/metrics/", include("metrics.urls")),
59 path("v1/projects/<int:project_id>/", include("auto_labeling.urls")),
60 path("v1/projects/<int:project_id>/", include("examples.urls")),
61 path("v1/projects/<int:project_id>/", include("labels.urls")),
62 path("v1/projects/<int:project_id>/", include("label_types.urls")),
63 path("swagger/", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
64 re_path("", TemplateView.as_view(template_name="index.html")),
65 ]
66
[end of backend/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/config/urls.py b/backend/config/urls.py
--- a/backend/config/urls.py
+++ b/backend/config/urls.py
@@ -15,6 +15,7 @@
"""
import os
import re
+from pathlib import Path
from django.conf import settings
from django.contrib import admin
@@ -36,6 +37,7 @@
urlpatterns = []
if settings.DEBUG or os.environ.get("STANDALONE", False):
+ static_dir = Path(__file__).resolve().parent.parent / "client" / "dist"
# For showing images and audios in the case of pip and Docker.
urlpatterns.append(
re_path(
@@ -44,6 +46,8 @@
{"document_root": settings.MEDIA_ROOT},
)
)
+ # For showing favicon on the case of pip and Docker.
+ urlpatterns.append(path("favicon.ico", serve, {"document_root": static_dir, "path": "favicon.ico"}))
urlpatterns += [
path("admin/", admin.site.urls),
| {"golden_diff": "diff --git a/backend/config/urls.py b/backend/config/urls.py\n--- a/backend/config/urls.py\n+++ b/backend/config/urls.py\n@@ -15,6 +15,7 @@\n \"\"\"\n import os\n import re\n+from pathlib import Path\n \n from django.conf import settings\n from django.contrib import admin\n@@ -36,6 +37,7 @@\n \n urlpatterns = []\n if settings.DEBUG or os.environ.get(\"STANDALONE\", False):\n+ static_dir = Path(__file__).resolve().parent.parent / \"client\" / \"dist\"\n # For showing images and audios in the case of pip and Docker.\n urlpatterns.append(\n re_path(\n@@ -44,6 +46,8 @@\n {\"document_root\": settings.MEDIA_ROOT},\n )\n )\n+ # For showing favicon on the case of pip and Docker.\n+ urlpatterns.append(path(\"favicon.ico\", serve, {\"document_root\": static_dir, \"path\": \"favicon.ico\"}))\n \n urlpatterns += [\n path(\"admin/\", admin.site.urls),\n", "issue": "[Bug report] Static files are not copied on pip installation\nHow to reproduce the behaviour\r\n---------\r\nSeems like (some?) static files are not copied on pip installation.\r\nFor instance `http://site.com/favicon.ico` is available on Docker Compose installation. But it is 404'd on pip installation.\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: CentOS 8.3\r\n* Python Version Used: 3.8.8\r\n* When you install doccano: v1.2.1\r\n* How did you install doccano (Heroku button etc): pip\r\n\n", "before_files": [{"content": "\"\"\"app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport os\nimport re\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.auth.views import TemplateView\nfrom django.urls import include, path, re_path\nfrom django.views.static import serve\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"doccano API\",\n default_version=\"v1\",\n description=\"doccano API description\",\n license=openapi.License(name=\"MIT License\"),\n ),\n public=True,\n)\n\nurlpatterns = []\nif settings.DEBUG or os.environ.get(\"STANDALONE\", False):\n # For showing images and audios in the case of pip and Docker.\n urlpatterns.append(\n re_path(\n r\"^%s(?P<path>.*)$\" % re.escape(settings.MEDIA_URL.lstrip(\"/\")),\n serve,\n {\"document_root\": settings.MEDIA_ROOT},\n )\n )\n\nurlpatterns += [\n path(\"admin/\", admin.site.urls),\n path(\"api-auth/\", include(\"rest_framework.urls\")),\n path(\"v1/health/\", include(\"health_check.urls\")),\n path(\"v1/\", include(\"api.urls\")),\n path(\"v1/\", include(\"roles.urls\")),\n path(\"v1/\", include(\"users.urls\")),\n path(\"v1/\", include(\"data_import.urls\")),\n path(\"v1/\", include(\"data_export.urls\")),\n path(\"v1/\", include(\"projects.urls\")),\n path(\"v1/projects/<int:project_id>/metrics/\", include(\"metrics.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"auto_labeling.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"examples.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"labels.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"label_types.urls\")),\n path(\"swagger/\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"schema-swagger-ui\"),\n re_path(\"\", TemplateView.as_view(template_name=\"index.html\")),\n]\n", "path": "backend/config/urls.py"}]} | 1,397 | 223 |
gh_patches_debug_5649 | rasdani/github-patches | git_diff | vaexio__vaex-1268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEATURE-REQUEST] Read files from ADLS Gen 2 (Cloud support for Azure Data Lake)
Hello,
in addition to support for S3 and GCS, it'd be awesome if it'd also be possible to read files directly from ADLS Gen 2, since some of us are restricted in the choice of cloud platforms we can use.
Thanks,
</issue>
<code>
[start of packages/vaex-core/vaex/file/__init__.py]
1 __author__ = 'breddels'
2 import pathlib
3 import logging
4 from glob import glob as local_glob
5 import io
6 import os
7 import re
8 import sys
9 from urllib.parse import parse_qs
10 import warnings
11 import pkg_resources
12
13 import pyarrow as pa
14 import pyarrow.fs
15
16 import vaex.file.cache
17
18
19 normal_open = open
20 logger = logging.getLogger("vaex.file")
21
22
23 class FileProxy:
24 '''Wraps a file object, giving it a name a dup() method
25
26 The dup is needed since a file is stateful, and needs to be duplicated in threads
27 '''
28 def __init__(self, file, name, dup):
29 self.file = file
30 self.name = name
31 self.dup = dup
32 self.closed = False
33
34 def __iter__(self):
35 raise NotImplementedError('This is just for looking like a file object to Pandas')
36
37 def write(self, *args):
38 return self.file.write(*args)
39
40 def read(self, *args):
41 return self.file.read(*args)
42
43 def seek(self, *args):
44 return self.file.seek(*args)
45
46 def readinto(self, *args):
47 return self.file.readinto(*args)
48
49 def tell(self):
50 return self.file.tell()
51
52 def close(self):
53 self.closed = True
54 return self.file.close()
55
56 def __enter__(self, *args):
57 return self
58
59 def __exit__(self, *args):
60 self.file.close()
61
62
63 def is_file_object(file):
64 return hasattr(file, 'read') and hasattr(file, 'seek')
65
66
67 def file_and_path(file, mode='r', fs_options={}, fs=None):
68 if is_file_object(file):
69 return file, stringyfy(file)
70 else:
71 file = open(file, mode=mode, fs_options=fs_options, fs=fs)
72 return file, stringyfy(file)
73
74
75 def is_path_like(path):
76 try:
77 stringyfy(path)
78 return True
79 except ValueError:
80 return False
81
82
83 def stringyfy(path):
84 """Get string from path like object of file like object
85
86 >>> import sys, pytest
87 >>> if sys.platform.startswith('win'):
88 ... pytest.skip('this doctest does not work on Windows')
89 ...
90 >>> stringyfy('/tmp/test')
91 '/tmp/test'
92 >>> from pathlib import Path
93 >>> stringyfy(Path('/tmp/test'))
94 '/tmp/test'
95 """
96 try:
97 # Pathlib support
98 path = path.__fspath__()
99 except AttributeError:
100 pass
101 if hasattr(path, 'name'): # passed in a file
102 path = path.name
103 if isinstance(path, str):
104 return path
105 raise ValueError(f'Cannot convert {path} to a path')
106
107
108 def split_scheme(path):
109 path = stringyfy(path)
110 if '://' in path:
111 scheme, path = path[:path.index('://')], path[path.index('://')+3:]
112 else:
113 scheme = None
114 return scheme, path
115
116
117 def memory_mappable(path):
118 path = stringyfy(path)
119 scheme, _ = split_scheme(path)
120 return scheme is None
121
122
123 def split_options(path, fs_options={}):
124 if isinstance(path, list):
125 paths = []
126 previous_options = None
127 for path in path:
128 path, options = split_options(path, fs_options)
129 if previous_options is not None:
130 if previous_options != options:
131 raise ValueError(f'Inconsistent set of fs_options given: {previous_options} {options}')
132 else:
133 previous_options = options
134 paths.append(path)
135 return paths, previous_options
136 path = stringyfy(path)
137 match = re.match(r'(.*?)\?((&?[^=&?]+=[^=&?]+)+)', path)
138 if match:
139 naked_path, query = match.groups()[:2]
140 else:
141 naked_path = path
142 query = ''
143 options = fs_options.copy()
144 options.update({key: values[0] for key, values in parse_qs(query).items()})
145 return naked_path, options
146
147
148 def split_ext(path, fs_options={}):
149 path, fs_options = split_options(path, fs_options=fs_options)
150 base, ext = os.path.splitext(path)
151 return base, ext, fs_options
152
153
154 def exists(path, fs_options={}, fs=None):
155 """Checks if file exists.
156
157 >>> vaex.file.exists('/you/do/not')
158 False
159
160 >>> vaex.file.exists('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True})
161 True
162 """
163 fs, path = parse(path, fs_options=fs_options, fs=fs)
164 if fs is None:
165 return os.path.exists(path)
166 else:
167 return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound
168
169
170 def _get_scheme_handler(path):
171 scheme, _ = split_scheme(path)
172 for entry in pkg_resources.iter_entry_points(group='vaex.file.scheme'):
173 if entry.name == scheme:
174 return entry.load()
175 raise ValueError(f'Do not know how to open {path}, no handler for {scheme} is known')
176
177
178 def remove(path):
179 scheme, path = split_scheme(path)
180 if scheme:
181 raise ValueError('Cannot delete non-local files yet')
182 os.remove(path)
183
184
185 def parse(path, fs_options={}, fs=None, for_arrow=False):
186 if fs is not None:
187 if fs_options:
188 warnings.warn(f'Passed fs_options while fs was specified, {fs_options} are ignored')
189 if 'fsspec' in sys.modules:
190 import fsspec
191 if isinstance(fs, fsspec.AbstractFileSystem):
192 fs = pa.fs.FSSpecHandler(fs)
193 return fs, path
194 if isinstance(path, (list, tuple)):
195 scheme, _ = split_scheme(path[0])
196 else:
197 scheme, _ = split_scheme(path)
198 if not scheme:
199 return None, path
200 if isinstance(path, (list, tuple)):
201 module = _get_scheme_handler(path[0])
202 return module.parse(path[0], fs_options, for_arrow=for_arrow)[0], path
203 else:
204 module = _get_scheme_handler(path)
205 return module.parse(path, fs_options, for_arrow=for_arrow)
206
207
208 def create_dir(path, fs_options, fs=None):
209 fs, path = parse(path, fs_options=fs_options, fs=fs)
210 if fs is None:
211 fs = pa.fs.LocalFileSystem()
212 fs.create_dir(path, recursive=True)
213
214
215 def fingerprint(path, fs_options={}, fs=None):
216 """Deterministic fingerprint for a file, useful in combination with dask or detecting file changes.
217
218 Based on mtime (modification time), file size, and the path. May lead to
219 false negative if the path changes, but not the content.
220
221 >>> fingerprint('/data/taxi.parquet') # doctest: +SKIP
222 '0171ec50cb2cf71b8e4f813212063a19'
223
224 >>> fingerprint('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True}) # doctest: +SKIP
225 '7c962e2d8c21b6a3681afb682d3bf91b'
226 """
227 fs, path = parse(path, fs_options, fs=fs)
228 path = stringyfy(path)
229 if fs is None:
230 mtime = os.path.getmtime(path)
231 size = os.path.getsize(path)
232 else:
233 info = fs.get_file_info([path])[0]
234 mtime = info.mtime_ns
235 size = info.size
236 import vaex.cache
237 return vaex.cache.fingerprint(('file', (path, mtime, size)))
238
239
240 def open(path, mode='rb', fs_options={}, fs=None, for_arrow=False, mmap=False, encoding="utf8"):
241 if is_file_object(path):
242 return path
243 fs, path = parse(path, fs_options=fs_options, fs=fs, for_arrow=for_arrow)
244 if fs is None:
245 path = stringyfy(path)
246 if for_arrow:
247 if fs_options:
248 raise ValueError(f'fs_options not supported for local files. You passed: {repr(fs_options)}.')
249 if mmap:
250 return pa.memory_map(path, mode)
251 else:
252 return pa.OSFile(path, mode)
253 else:
254 if 'b' not in mode:
255 return normal_open(path, mode, encoding=encoding)
256 else:
257 return normal_open(path, mode)
258 if mode == 'rb':
259 def create():
260 return fs.open_input_file(path)
261 elif mode == "r":
262 def create():
263 return io.TextIOWrapper(fs.open_input_file(path), encoding=encoding)
264 elif mode == 'wb':
265 def create():
266 return fs.open_output_stream(path)
267 elif mode == "w":
268 def create():
269 return io.TextIOWrapper(fs.open_output_stream(path), encoding=encoding)
270 else:
271 raise ValueError(f'Only mode=rb/bw/r/w are supported, not {mode}')
272 return FileProxy(create(), path, create)
273
274
275 def dup(file):
276 """Duplicate a file like object, s3 or cached file supported"""
277 if isinstance(file, (vaex.file.cache.CachedFile, FileProxy)):
278 return file.dup()
279 else:
280 return normal_open(file.name, file.mode)
281
282 def glob(path, fs_options={}, fs=None):
283 if fs:
284 raise ValueError('globbing with custom fs not supported yet, please open an issue.')
285 scheme, _ = split_scheme(path)
286 if not scheme:
287 return local_glob(path)
288 module = _get_scheme_handler(path)
289 return module.glob(path, fs_options)
290
291
292 def ext(path):
293 path = stringyfy(path)
294 path, options = split_options(path)
295 return os.path.splitext(path)[1]
296
[end of packages/vaex-core/vaex/file/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/file/__init__.py b/packages/vaex-core/vaex/file/__init__.py
--- a/packages/vaex-core/vaex/file/__init__.py
+++ b/packages/vaex-core/vaex/file/__init__.py
@@ -190,6 +190,8 @@
import fsspec
if isinstance(fs, fsspec.AbstractFileSystem):
fs = pa.fs.FSSpecHandler(fs)
+ if for_arrow:
+ fs = pyarrow.fs.PyFileSystem(fs)
return fs, path
if isinstance(path, (list, tuple)):
scheme, _ = split_scheme(path[0])
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/file/__init__.py b/packages/vaex-core/vaex/file/__init__.py\n--- a/packages/vaex-core/vaex/file/__init__.py\n+++ b/packages/vaex-core/vaex/file/__init__.py\n@@ -190,6 +190,8 @@\n import fsspec\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs = pa.fs.FSSpecHandler(fs)\n+ if for_arrow:\n+ fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n if isinstance(path, (list, tuple)):\n scheme, _ = split_scheme(path[0])\n", "issue": "[FEATURE-REQUEST] Read files from ADLS Gen 2 (Cloud support for Azure Data Lake)\nHello,\r\n\r\nin addition to support for S3 and GCS, it'd be awesome if it'd also be possible to read files directly from ADLS Gen 2, since some of us are restricted in the choice of cloud platforms we can use.\r\n\r\nThanks,\n", "before_files": [{"content": "__author__ = 'breddels'\nimport pathlib\nimport logging\nfrom glob import glob as local_glob\nimport io\nimport os\nimport re\nimport sys\nfrom urllib.parse import parse_qs\nimport warnings\nimport pkg_resources\n\nimport pyarrow as pa\nimport pyarrow.fs\n\nimport vaex.file.cache\n\n\nnormal_open = open\nlogger = logging.getLogger(\"vaex.file\")\n\n\nclass FileProxy:\n '''Wraps a file object, giving it a name a dup() method\n\n The dup is needed since a file is stateful, and needs to be duplicated in threads\n '''\n def __init__(self, file, name, dup):\n self.file = file\n self.name = name\n self.dup = dup\n self.closed = False\n\n def __iter__(self):\n raise NotImplementedError('This is just for looking like a file object to Pandas')\n\n def write(self, *args):\n return self.file.write(*args)\n\n def read(self, *args):\n return self.file.read(*args)\n\n def seek(self, *args):\n return self.file.seek(*args)\n\n def readinto(self, *args):\n return self.file.readinto(*args)\n\n def tell(self):\n return self.file.tell()\n\n def close(self):\n self.closed = True\n return self.file.close()\n\n def __enter__(self, *args):\n return self\n\n def __exit__(self, *args):\n self.file.close()\n\n\ndef is_file_object(file):\n return hasattr(file, 'read') and hasattr(file, 'seek')\n\n\ndef file_and_path(file, mode='r', fs_options={}, fs=None):\n if is_file_object(file):\n return file, stringyfy(file)\n else:\n file = open(file, mode=mode, fs_options=fs_options, fs=fs)\n return file, stringyfy(file)\n\n\ndef is_path_like(path):\n try:\n stringyfy(path)\n return True\n except ValueError:\n return False\n\n\ndef stringyfy(path):\n \"\"\"Get string from path like object of file like object\n\n >>> import sys, pytest\n >>> if sys.platform.startswith('win'):\n ... pytest.skip('this doctest does not work on Windows')\n ...\n >>> stringyfy('/tmp/test')\n '/tmp/test'\n >>> from pathlib import Path\n >>> stringyfy(Path('/tmp/test'))\n '/tmp/test'\n \"\"\"\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')\n\n\ndef split_scheme(path):\n path = stringyfy(path)\n if '://' in path:\n scheme, path = path[:path.index('://')], path[path.index('://')+3:]\n else:\n scheme = None\n return scheme, path\n\n\ndef memory_mappable(path):\n path = stringyfy(path)\n scheme, _ = split_scheme(path)\n return scheme is None\n\n\ndef split_options(path, fs_options={}):\n if isinstance(path, list):\n paths = []\n previous_options = None\n for path in path:\n path, options = split_options(path, fs_options)\n if previous_options is not None:\n if previous_options != options:\n raise ValueError(f'Inconsistent set of fs_options given: {previous_options} {options}')\n else:\n previous_options = options\n paths.append(path)\n return paths, previous_options\n path = stringyfy(path)\n match = re.match(r'(.*?)\\?((&?[^=&?]+=[^=&?]+)+)', path)\n if match:\n naked_path, query = match.groups()[:2]\n else:\n naked_path = path\n query = ''\n options = fs_options.copy()\n options.update({key: values[0] for key, values in parse_qs(query).items()})\n return naked_path, options\n\n\ndef split_ext(path, fs_options={}):\n path, fs_options = split_options(path, fs_options=fs_options)\n base, ext = os.path.splitext(path)\n return base, ext, fs_options\n\n\ndef exists(path, fs_options={}, fs=None):\n \"\"\"Checks if file exists.\n\n >>> vaex.file.exists('/you/do/not')\n False\n\n >>> vaex.file.exists('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True})\n True\n \"\"\"\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n return os.path.exists(path)\n else:\n return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound\n\n\ndef _get_scheme_handler(path):\n scheme, _ = split_scheme(path)\n for entry in pkg_resources.iter_entry_points(group='vaex.file.scheme'):\n if entry.name == scheme:\n return entry.load()\n raise ValueError(f'Do not know how to open {path}, no handler for {scheme} is known')\n\n\ndef remove(path):\n scheme, path = split_scheme(path)\n if scheme:\n raise ValueError('Cannot delete non-local files yet')\n os.remove(path)\n\n\ndef parse(path, fs_options={}, fs=None, for_arrow=False):\n if fs is not None:\n if fs_options:\n warnings.warn(f'Passed fs_options while fs was specified, {fs_options} are ignored')\n if 'fsspec' in sys.modules:\n import fsspec\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs = pa.fs.FSSpecHandler(fs)\n return fs, path\n if isinstance(path, (list, tuple)):\n scheme, _ = split_scheme(path[0])\n else:\n scheme, _ = split_scheme(path)\n if not scheme:\n return None, path\n if isinstance(path, (list, tuple)):\n module = _get_scheme_handler(path[0])\n return module.parse(path[0], fs_options, for_arrow=for_arrow)[0], path\n else:\n module = _get_scheme_handler(path)\n return module.parse(path, fs_options, for_arrow=for_arrow)\n\n\ndef create_dir(path, fs_options, fs=None):\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n fs = pa.fs.LocalFileSystem()\n fs.create_dir(path, recursive=True)\n\n\ndef fingerprint(path, fs_options={}, fs=None):\n \"\"\"Deterministic fingerprint for a file, useful in combination with dask or detecting file changes.\n\n Based on mtime (modification time), file size, and the path. May lead to\n false negative if the path changes, but not the content.\n\n >>> fingerprint('/data/taxi.parquet') # doctest: +SKIP\n '0171ec50cb2cf71b8e4f813212063a19'\n\n >>> fingerprint('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True}) # doctest: +SKIP\n '7c962e2d8c21b6a3681afb682d3bf91b'\n \"\"\"\n fs, path = parse(path, fs_options, fs=fs)\n path = stringyfy(path)\n if fs is None:\n mtime = os.path.getmtime(path)\n size = os.path.getsize(path)\n else:\n info = fs.get_file_info([path])[0]\n mtime = info.mtime_ns\n size = info.size\n import vaex.cache\n return vaex.cache.fingerprint(('file', (path, mtime, size)))\n\n\ndef open(path, mode='rb', fs_options={}, fs=None, for_arrow=False, mmap=False, encoding=\"utf8\"):\n if is_file_object(path):\n return path\n fs, path = parse(path, fs_options=fs_options, fs=fs, for_arrow=for_arrow)\n if fs is None:\n path = stringyfy(path)\n if for_arrow:\n if fs_options:\n raise ValueError(f'fs_options not supported for local files. You passed: {repr(fs_options)}.')\n if mmap:\n return pa.memory_map(path, mode)\n else:\n return pa.OSFile(path, mode)\n else:\n if 'b' not in mode:\n return normal_open(path, mode, encoding=encoding)\n else:\n return normal_open(path, mode)\n if mode == 'rb':\n def create():\n return fs.open_input_file(path)\n elif mode == \"r\":\n def create():\n return io.TextIOWrapper(fs.open_input_file(path), encoding=encoding)\n elif mode == 'wb':\n def create():\n return fs.open_output_stream(path)\n elif mode == \"w\":\n def create():\n return io.TextIOWrapper(fs.open_output_stream(path), encoding=encoding)\n else:\n raise ValueError(f'Only mode=rb/bw/r/w are supported, not {mode}')\n return FileProxy(create(), path, create)\n\n\ndef dup(file):\n \"\"\"Duplicate a file like object, s3 or cached file supported\"\"\"\n if isinstance(file, (vaex.file.cache.CachedFile, FileProxy)):\n return file.dup()\n else:\n return normal_open(file.name, file.mode)\n\ndef glob(path, fs_options={}, fs=None):\n if fs:\n raise ValueError('globbing with custom fs not supported yet, please open an issue.')\n scheme, _ = split_scheme(path)\n if not scheme:\n return local_glob(path)\n module = _get_scheme_handler(path)\n return module.glob(path, fs_options)\n\n\ndef ext(path):\n path = stringyfy(path)\n path, options = split_options(path)\n return os.path.splitext(path)[1]\n", "path": "packages/vaex-core/vaex/file/__init__.py"}]} | 3,583 | 151 |
gh_patches_debug_3129 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3523 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demo is broken for 0.1.6
## Description
<!-- A clear and concise description of what the bug is. -->
The demo can't be deployed with the current `master` branch (0.1.6)
The reasons are:
- We didn't add the fix to the demo settings in #3499 (just to prod), and
- We have an extra dependency in the demo setup which we need to build in but the current `Dockerfile` doesn't support that.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
We should be able to demo version 0.1.6 on our demo site.
</issue>
<code>
[start of demo/settings.py]
1 from config.settings.common_settings import * # noqa
2 from decouple import config as decouple_config
3
4 INSTALLED_APPS += [ # noqa
5 "demo",
6 "health_check",
7 ]
8
9 MIDDLEWARE += [ # noqa
10 "demo.middleware.LiveDemoModeMiddleware",
11 ]
12
13 MATHESAR_MODE = 'PRODUCTION'
14 MATHESAR_LIVE_DEMO = True
15 MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)
16 MATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)
17
18 MATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'
19 MATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(
20 'MATHESAR_DEMO_ARXIV_LOG_PATH',
21 default='/var/lib/mathesar/demo/arxiv_db_schema_log'
22 )
23 BASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa
24 ROOT_URLCONF = "demo.urls"
25
[end of demo/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/settings.py b/demo/settings.py
--- a/demo/settings.py
+++ b/demo/settings.py
@@ -10,6 +10,9 @@
"demo.middleware.LiveDemoModeMiddleware",
]
+
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
MATHESAR_MODE = 'PRODUCTION'
MATHESAR_LIVE_DEMO = True
MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)
| {"golden_diff": "diff --git a/demo/settings.py b/demo/settings.py\n--- a/demo/settings.py\n+++ b/demo/settings.py\n@@ -10,6 +10,9 @@\n \"demo.middleware.LiveDemoModeMiddleware\",\n ]\n \n+\n+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n+\n MATHESAR_MODE = 'PRODUCTION'\n MATHESAR_LIVE_DEMO = True\n MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\n", "issue": "Demo is broken for 0.1.6\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nThe demo can't be deployed with the current `master` branch (0.1.6)\r\n\r\nThe reasons are:\r\n\r\n- We didn't add the fix to the demo settings in #3499 (just to prod), and\r\n- We have an extra dependency in the demo setup which we need to build in but the current `Dockerfile` doesn't support that.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nWe should be able to demo version 0.1.6 on our demo site.\n", "before_files": [{"content": "from config.settings.common_settings import * # noqa\nfrom decouple import config as decouple_config\n\nINSTALLED_APPS += [ # noqa\n \"demo\",\n \"health_check\",\n]\n\nMIDDLEWARE += [ # noqa\n \"demo.middleware.LiveDemoModeMiddleware\",\n]\n\nMATHESAR_MODE = 'PRODUCTION'\nMATHESAR_LIVE_DEMO = True\nMATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\nMATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)\n\nMATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'\nMATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(\n 'MATHESAR_DEMO_ARXIV_LOG_PATH',\n default='/var/lib/mathesar/demo/arxiv_db_schema_log'\n)\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa\nROOT_URLCONF = \"demo.urls\"\n", "path": "demo/settings.py"}]} | 935 | 114 |
gh_patches_debug_18647 | rasdani/github-patches | git_diff | internetarchive__openlibrary-4075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "noindex" meta tag to people pages less than 30 days old
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
Thousands of spam pages have been added to OL over time. In many instances, people and list pages are where the spam shows up. Spam can harm user trust in OL, affect OL's ranking in search engines, and potentially point to malicious sites and thus harm our users.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
In this week's community call, the decision was made to add the ["noindex" meta tag](https://en.wikipedia.org/wiki/Noindex) to people (and I think list) pages belonging to accounts less than 30 days old. This will result in those pages not being indexed by search engines that honor that meta tag.
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@mekarpeles @cdrini
</issue>
<code>
[start of openlibrary/core/helpers.py]
1 """Generic helper functions to use in the templates and the webapp.
2 """
3 import web
4 from datetime import datetime
5 import simplejson
6 import re
7
8 from six.moves.urllib.parse import urlsplit
9
10 import babel
11 import babel.core
12 import babel.dates
13 import babel.numbers
14
15 try:
16 import genshi
17 import genshi.filters
18 except ImportError:
19 genshi = None
20
21 try:
22 from bs4 import BeautifulSoup
23 except ImportError:
24 BeautifulSoup = None
25
26 import six
27
28 from infogami import config
29
30 # handy utility to parse ISO date strings
31 from infogami.infobase.utils import parse_datetime
32 from infogami.utils.view import safeint
33
34 # TODO: i18n should be moved to core or infogami
35 from openlibrary.i18n import gettext as _ # noqa: F401
36
37 __all__ = [
38 "sanitize",
39 "json_encode",
40 "safesort",
41 "datestr", "format_date",
42 "sprintf", "cond", "commify", "truncate", "datetimestr_utc",
43 "urlsafe", "texsafe",
44 "percentage", "affiliate_id", "bookreader_host",
45 "private_collections", "private_collection_in",
46
47 # functions imported from elsewhere
48 "parse_datetime", "safeint"
49 ]
50 __docformat__ = "restructuredtext en"
51
52 def sanitize(html, encoding='utf8'):
53 """Removes unsafe tags and attributes from html and adds
54 ``rel="nofollow"`` attribute to all external links.
55 Using encoding=None if passing unicode strings e.g. for Python 3.
56 encoding="utf8" matches default format for earlier versions of Genshi
57 https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version
58 """
59
60 # Can't sanitize unless genshi module is available
61 if genshi is None:
62 return html
63
64 def get_nofollow(name, event):
65 attrs = event[1][1]
66 href = attrs.get('href', '')
67
68 if href:
69 # add rel=nofollow to all absolute links
70 _, host, _, _, _ = urlsplit(href)
71 if host:
72 return 'nofollow'
73
74 try:
75 html = genshi.HTML(html, encoding=encoding)
76
77 # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:
78 # don't catch Unicode errors so we can tell if we're getting bytes
79 except genshi.ParseError:
80 if BeautifulSoup:
81 # Bad html. Tidy it up using BeautifulSoup
82 html = str(BeautifulSoup(html, "lxml"))
83 try:
84 html = genshi.HTML(html)
85 except Exception:
86 # Failed to sanitize.
87 # We can't do any better than returning the original HTML, without sanitizing.
88 return html
89 else:
90 raise
91
92 stream = html \
93 | genshi.filters.HTMLSanitizer() \
94 | genshi.filters.Transformer("//a").attr("rel", get_nofollow)
95 return stream.render()
96
97
98 def json_encode(d, **kw):
99 """Same as simplejson.dumps.
100 """
101 return simplejson.dumps(d, **kw)
102
103
104 def safesort(iterable, key=None, reverse=False):
105 """Sorts heterogeneous of objects without raising errors.
106
107 Sorting heterogeneous objects sometimes causes error. For example,
108 datetime and Nones don't go well together. This function takes special
109 care to make that work.
110 """
111 key = key or (lambda x: x)
112 def safekey(x):
113 k = key(x)
114 return (k.__class__.__name__, k)
115 return sorted(iterable, key=safekey, reverse=reverse)
116
117 def datestr(then, now=None, lang=None, relative=True):
118 """Internationalized version of web.datestr."""
119 lang = lang or web.ctx.get('lang') or "en"
120 if relative:
121 if now is None:
122 now = datetime.now()
123 delta = then - now
124 if abs(delta.days) < 4: # Threshold from web.py
125 return babel.dates.format_timedelta(delta,
126 add_direction=True,
127 locale=_get_babel_locale(lang))
128 return format_date(then, lang=lang)
129
130
131 def datetimestr_utc(then):
132 return then.strftime("%Y-%m-%dT%H:%M:%SZ")
133
134 def format_date(date, lang=None):
135 lang = lang or web.ctx.get('lang') or "en"
136 locale = _get_babel_locale(lang)
137 return babel.dates.format_date(date, format="long", locale=locale)
138
139 def _get_babel_locale(lang):
140 try:
141 return babel.Locale(lang)
142 except babel.core.UnknownLocaleError:
143 return babel.Locale("en")
144
145
146 def sprintf(s, *a, **kw):
147 """Handy utility for string replacements.
148
149 >>> sprintf('hello %s', 'python')
150 'hello python'
151 >>> sprintf('hello %(name)s', name='python')
152 'hello python'
153 """
154 args = kw or a
155 if args:
156 return s % args
157 else:
158 return s
159
160
161 def cond(pred, true_value, false_value=""):
162 """Lisp style cond function.
163
164 Hanly to use instead of if-else expression.
165 """
166 if pred:
167 return true_value
168 else:
169 return false_value
170
171
172 def commify(number, lang=None):
173 """localized version of web.commify"""
174 try:
175 lang = lang or web.ctx.get("lang") or "en"
176 return babel.numbers.format_number(int(number), lang)
177 except:
178 return six.text_type(number)
179
180
181 def truncate(text, limit):
182 """Truncate text and add ellipses if it longer than specified limit."""
183 if not text:
184 return ''
185 if len(text) <= limit:
186 return text
187 return text[:limit] + "..."
188
189
190 def urlsafe(path):
191 """Replaces the unsafe chars from path with underscores.
192 """
193 return _get_safepath_re().sub('_', path).strip('_')[:100]
194
195 @web.memoize
196 def _get_safepath_re():
197 """Make regular expression that matches all unsafe chars."""
198 # unsafe chars according to RFC 2396
199 reserved = ";/?:@&=+$,"
200 delims = '<>#%"'
201 unwise = "{}|\\^[]`"
202 space = ' \n\r'
203
204 unsafe = reserved + delims + unwise + space
205 pattern = '[%s]+' % "".join(re.escape(c) for c in unsafe)
206 return re.compile(pattern)
207
208
209 def get_coverstore_url():
210 """Returns the base url of coverstore by looking at the config."""
211 return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')
212
213
214 _texsafe_map = {
215 '"': r'\textquotedbl{}',
216 '#': r'\#',
217 '$': r'\$',
218 '%': r'\%',
219 '&': r'\&',
220 '<': r'\textless{}',
221 '>': r'\textgreater{}',
222 '\\': r'\textbackslash{}',
223 '^': r'\^{}',
224 '_': r'\_{}',
225 '{': r'\{',
226 '}': r'\}',
227 '|': r'\textbar{}',
228 '~': r'\~{}',
229 }
230
231 _texsafe_re = None
232
233 def texsafe(text):
234 """Escapes the special characters in the given text for using it in tex type setting.
235
236 Tex (or Latex) uses some characters in the ascii character range for
237 special notations. These characters must be escaped when occur in the
238 regular text. This function escapes those special characters.
239
240 The list of special characters and the latex command to typeset them can
241 be found in `The Comprehensive LaTeX Symbol List`_.
242
243 .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf
244 """
245 global _texsafe_re
246 if _texsafe_re is None:
247 pattern = "[%s]" % re.escape("".join(list(_texsafe_map)))
248 _texsafe_re = re.compile(pattern)
249
250 return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)
251
252 def percentage(value, total):
253 """Computes percentage.
254
255 >>> percentage(1, 10)
256 10.0
257 >>> percentage(0, 0)
258 0.0
259 """
260 return (value * 100.0) / total if total else 0.0
261
262 def uniq(values, key=None):
263 """Returns the unique entries from the given values in the original order.
264
265 The value of the optional `key` parameter should be a function that takes
266 a single argument and returns a key to test the uniqueness.
267 """
268 key = key or (lambda x: x)
269 s = set()
270 result = []
271 for v in values:
272 k = key(v)
273 if k not in s:
274 s.add(k)
275 result.append(v)
276 return result
277
278 def affiliate_id(affiliate):
279 return config.get('affiliate_ids', {}).get(affiliate, '')
280
281 def bookreader_host():
282 return config.get('bookreader_host', '')
283
284 def private_collections():
285 """Collections which are lendable but should not be linked from OL
286 TODO: Remove when we can handle institutional books"""
287 return ['georgetown-university-law-library-rr']
288
289 def private_collection_in(collections):
290 return any(x in private_collections() for x in collections)
291
292 def _get_helpers():
293 _globals = globals()
294 return web.storage((k, _globals[k]) for k in __all__)
295
296
297 ## This must be at the end of this module
298 helpers = _get_helpers()
299
[end of openlibrary/core/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py
--- a/openlibrary/core/helpers.py
+++ b/openlibrary/core/helpers.py
@@ -35,7 +35,7 @@
"sanitize",
"json_encode",
"safesort",
- "datestr", "format_date",
+ "days_since", "datestr", "format_date",
"sprintf", "cond", "commify", "truncate", "datetimestr_utc",
"urlsafe", "texsafe",
"percentage", "affiliate_id", "bookreader_host",
@@ -111,6 +111,12 @@
return (k.__class__.__name__, k)
return sorted(iterable, key=safekey, reverse=reverse)
+
+def days_since(then, now=None):
+ delta = then - (now or datetime.now())
+ return abs(delta.days)
+
+
def datestr(then, now=None, lang=None, relative=True):
"""Internationalized version of web.datestr."""
lang = lang or web.ctx.get('lang') or "en"
| {"golden_diff": "diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py\n--- a/openlibrary/core/helpers.py\n+++ b/openlibrary/core/helpers.py\n@@ -35,7 +35,7 @@\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n- \"datestr\", \"format_date\",\n+ \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n@@ -111,6 +111,12 @@\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n \n+\n+def days_since(then, now=None):\n+ delta = then - (now or datetime.now())\n+ return abs(delta.days)\n+\n+\n def datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n", "issue": "Add \"noindex\" meta tag to people pages less than 30 days old\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\nThousands of spam pages have been added to OL over time. In many instances, people and list pages are where the spam shows up. Spam can harm user trust in OL, affect OL's ranking in search engines, and potentially point to malicious sites and thus harm our users.\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nIn this week's community call, the decision was made to add the [\"noindex\" meta tag](https://en.wikipedia.org/wiki/Noindex) to people (and I think list) pages belonging to accounts less than 30 days old. This will result in those pages not being indexed by search engines that honor that meta tag.\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@mekarpeles @cdrini \r\n\r\n\n", "before_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport simplejson\nimport re\n\nfrom six.moves.urllib.parse import urlsplit\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nimport six\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as simplejson.dumps.\n \"\"\"\n return simplejson.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}]} | 3,713 | 241 |
gh_patches_debug_28726 | rasdani/github-patches | git_diff | networkx__networkx-7388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
3.2.1: pytest fails in networkx/drawing/tests/test_pylab.py with `module 'matplotlib' has no attribute 'use'`
matplotlib 3.8.3 and pytest fails on scanning units with
```console
+ PYTHONPATH=/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib64/python3.9/site-packages:/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib/python3.9/site-packages
+ /usr/bin/pytest -ra -m 'not network'
============================= test session starts ==============================
platform linux -- Python 3.9.18, pytest-8.1.1, pluggy-1.4.0
rootdir: /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1
configfile: pyproject.toml
collected 4831 items / 1 error / 30 skipped
==================================== ERRORS ====================================
____________ ERROR collecting networkx/drawing/tests/test_pylab.py _____________
networkx/drawing/tests/test_pylab.py:10: in <module>
mpl.use("PS")
E AttributeError: module 'matplotlib' has no attribute 'use'
=============================== warnings summary ===============================
networkx/utils/backends.py:135
/home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:135: RuntimeWarning: networkx backend defined more than once: nx-loopback
backends.update(_get_backends("networkx.backends"))
networkx/utils/backends.py:576
/home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:576: DeprecationWarning:
random_tree is deprecated and will be removed in NX v3.4
Use random_labeled_tree instead.
return self.orig_func(*args, **kwargs)
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
```
</issue>
<code>
[start of networkx/conftest.py]
1 """
2 Testing
3 =======
4
5 General guidelines for writing good tests:
6
7 - doctests always assume ``import networkx as nx`` so don't add that
8 - prefer pytest fixtures over classes with setup methods.
9 - use the ``@pytest.mark.parametrize`` decorator
10 - use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.
11 and add the module to the relevant entries below.
12
13 """
14 import os
15 import sys
16 import warnings
17 from importlib.metadata import entry_points
18
19 import pytest
20
21 import networkx
22
23
24 def pytest_addoption(parser):
25 parser.addoption(
26 "--runslow", action="store_true", default=False, help="run slow tests"
27 )
28 parser.addoption(
29 "--backend",
30 action="store",
31 default=None,
32 help="Run tests with a backend by auto-converting nx graphs to backend graphs",
33 )
34 parser.addoption(
35 "--fallback-to-nx",
36 action="store_true",
37 default=False,
38 help="Run nx function if a backend doesn't implement a dispatchable function"
39 " (use with --backend)",
40 )
41
42
43 def pytest_configure(config):
44 config.addinivalue_line("markers", "slow: mark test as slow to run")
45 backend = config.getoption("--backend")
46 if backend is None:
47 backend = os.environ.get("NETWORKX_TEST_BACKEND")
48 # nx-loopback backend is only available when testing
49 backends = entry_points(name="nx-loopback", group="networkx.backends")
50 if backends:
51 networkx.utils.backends.backends["nx-loopback"] = next(iter(backends))
52 else:
53 warnings.warn(
54 "\n\n WARNING: Mixed NetworkX configuration! \n\n"
55 " This environment has mixed configuration for networkx.\n"
56 " The test object nx-loopback is not configured correctly.\n"
57 " You should not be seeing this message.\n"
58 " Try `pip install -e .`, or change your PYTHONPATH\n"
59 " Make sure python finds the networkx repo you are testing\n\n"
60 )
61 if backend:
62 networkx.config["backend_priority"] = [backend]
63 fallback_to_nx = config.getoption("--fallback-to-nx")
64 if not fallback_to_nx:
65 fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX")
66 networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)
67
68
69 def pytest_collection_modifyitems(config, items):
70 # Setting this to True here allows tests to be set up before dispatching
71 # any function call to a backend.
72 networkx.utils.backends._dispatchable._is_testing = True
73 if backend_priority := networkx.config["backend_priority"]:
74 # Allow pluggable backends to add markers to tests (such as skip or xfail)
75 # when running in auto-conversion test mode
76 backend = networkx.utils.backends.backends[backend_priority[0]].load()
77 if hasattr(backend, "on_start_tests"):
78 getattr(backend, "on_start_tests")(items)
79
80 if config.getoption("--runslow"):
81 # --runslow given in cli: do not skip slow tests
82 return
83 skip_slow = pytest.mark.skip(reason="need --runslow option to run")
84 for item in items:
85 if "slow" in item.keywords:
86 item.add_marker(skip_slow)
87
88
89 # TODO: The warnings below need to be dealt with, but for now we silence them.
90 @pytest.fixture(autouse=True)
91 def set_warnings():
92 warnings.filterwarnings(
93 "ignore",
94 category=FutureWarning,
95 message="\n\nsingle_target_shortest_path_length",
96 )
97 warnings.filterwarnings(
98 "ignore",
99 category=FutureWarning,
100 message="\n\nshortest_path",
101 )
102 warnings.filterwarnings(
103 "ignore", category=DeprecationWarning, message="\nforest_str is deprecated"
104 )
105 warnings.filterwarnings(
106 "ignore", category=DeprecationWarning, message="\n\nrandom_tree"
107 )
108 warnings.filterwarnings(
109 "ignore", category=DeprecationWarning, message="Edmonds has been deprecated"
110 )
111 warnings.filterwarnings(
112 "ignore",
113 category=DeprecationWarning,
114 message="MultiDiGraph_EdgeKey has been deprecated",
115 )
116 warnings.filterwarnings(
117 "ignore", category=DeprecationWarning, message="\n\nThe `normalized`"
118 )
119 warnings.filterwarnings(
120 "ignore",
121 category=DeprecationWarning,
122 message="The function `join` is deprecated",
123 )
124 warnings.filterwarnings(
125 "ignore",
126 category=DeprecationWarning,
127 message="\n\nstrongly_connected_components_recursive",
128 )
129 warnings.filterwarnings(
130 "ignore", category=DeprecationWarning, message="\n\nall_triplets"
131 )
132 warnings.filterwarnings(
133 "ignore", category=DeprecationWarning, message="\n\nrandom_triad"
134 )
135 warnings.filterwarnings(
136 "ignore", category=DeprecationWarning, message="minimal_d_separator"
137 )
138 warnings.filterwarnings(
139 "ignore", category=DeprecationWarning, message="d_separated"
140 )
141 warnings.filterwarnings("ignore", category=DeprecationWarning, message="\n\nk_core")
142 warnings.filterwarnings(
143 "ignore", category=DeprecationWarning, message="\n\nk_shell"
144 )
145 warnings.filterwarnings(
146 "ignore", category=DeprecationWarning, message="\n\nk_crust"
147 )
148 warnings.filterwarnings(
149 "ignore", category=DeprecationWarning, message="\n\nk_corona"
150 )
151 warnings.filterwarnings(
152 "ignore", category=DeprecationWarning, message="\n\ntotal_spanning_tree_weight"
153 )
154 warnings.filterwarnings(
155 "ignore", category=DeprecationWarning, message=r"\n\nThe 'create=matrix'"
156 )
157
158
159 @pytest.fixture(autouse=True)
160 def add_nx(doctest_namespace):
161 doctest_namespace["nx"] = networkx
162
163
164 # What dependencies are installed?
165
166 try:
167 import numpy
168
169 has_numpy = True
170 except ImportError:
171 has_numpy = False
172
173 try:
174 import scipy
175
176 has_scipy = True
177 except ImportError:
178 has_scipy = False
179
180 try:
181 import matplotlib
182
183 has_matplotlib = True
184 except ImportError:
185 has_matplotlib = False
186
187 try:
188 import pandas
189
190 has_pandas = True
191 except ImportError:
192 has_pandas = False
193
194 try:
195 import pygraphviz
196
197 has_pygraphviz = True
198 except ImportError:
199 has_pygraphviz = False
200
201 try:
202 import pydot
203
204 has_pydot = True
205 except ImportError:
206 has_pydot = False
207
208 try:
209 import sympy
210
211 has_sympy = True
212 except ImportError:
213 has_sympy = False
214
215
216 # List of files that pytest should ignore
217
218 collect_ignore = []
219
220 needs_numpy = [
221 "algorithms/approximation/traveling_salesman.py",
222 "algorithms/centrality/current_flow_closeness.py",
223 "algorithms/node_classification.py",
224 "algorithms/non_randomness.py",
225 "algorithms/shortest_paths/dense.py",
226 "algorithms/tree/mst.py",
227 "generators/expanders.py",
228 "linalg/bethehessianmatrix.py",
229 "linalg/laplacianmatrix.py",
230 "utils/misc.py",
231 "algorithms/centrality/laplacian.py",
232 ]
233 needs_scipy = [
234 "algorithms/approximation/traveling_salesman.py",
235 "algorithms/assortativity/correlation.py",
236 "algorithms/assortativity/mixing.py",
237 "algorithms/assortativity/pairs.py",
238 "algorithms/bipartite/matrix.py",
239 "algorithms/bipartite/spectral.py",
240 "algorithms/centrality/current_flow_betweenness.py",
241 "algorithms/centrality/current_flow_betweenness_subset.py",
242 "algorithms/centrality/eigenvector.py",
243 "algorithms/centrality/katz.py",
244 "algorithms/centrality/second_order.py",
245 "algorithms/centrality/subgraph_alg.py",
246 "algorithms/communicability_alg.py",
247 "algorithms/link_analysis/hits_alg.py",
248 "algorithms/link_analysis/pagerank_alg.py",
249 "algorithms/node_classification.py",
250 "algorithms/similarity.py",
251 "convert_matrix.py",
252 "drawing/layout.py",
253 "generators/spectral_graph_forge.py",
254 "generators/expanders.py",
255 "linalg/algebraicconnectivity.py",
256 "linalg/attrmatrix.py",
257 "linalg/bethehessianmatrix.py",
258 "linalg/graphmatrix.py",
259 "linalg/modularitymatrix.py",
260 "linalg/spectrum.py",
261 "utils/rcm.py",
262 "algorithms/centrality/laplacian.py",
263 ]
264 needs_matplotlib = ["drawing/nx_pylab.py"]
265 needs_pandas = ["convert_matrix.py"]
266 needs_pygraphviz = ["drawing/nx_agraph.py"]
267 needs_pydot = ["drawing/nx_pydot.py"]
268 needs_sympy = ["algorithms/polynomials.py"]
269
270 if not has_numpy:
271 collect_ignore += needs_numpy
272 if not has_scipy:
273 collect_ignore += needs_scipy
274 if not has_matplotlib:
275 collect_ignore += needs_matplotlib
276 if not has_pandas:
277 collect_ignore += needs_pandas
278 if not has_pygraphviz:
279 collect_ignore += needs_pygraphviz
280 if not has_pydot:
281 collect_ignore += needs_pydot
282 if not has_sympy:
283 collect_ignore += needs_sympy
284
[end of networkx/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/conftest.py b/networkx/conftest.py
--- a/networkx/conftest.py
+++ b/networkx/conftest.py
@@ -241,25 +241,31 @@
"algorithms/centrality/current_flow_betweenness_subset.py",
"algorithms/centrality/eigenvector.py",
"algorithms/centrality/katz.py",
+ "algorithms/centrality/laplacian.py",
"algorithms/centrality/second_order.py",
"algorithms/centrality/subgraph_alg.py",
"algorithms/communicability_alg.py",
+ "algorithms/community/divisive.py",
+ "algorithms/distance_measures.py",
"algorithms/link_analysis/hits_alg.py",
"algorithms/link_analysis/pagerank_alg.py",
"algorithms/node_classification.py",
"algorithms/similarity.py",
+ "algorithms/tree/mst.py",
+ "algorithms/walks.py",
"convert_matrix.py",
"drawing/layout.py",
+ "drawing/nx_pylab.py",
"generators/spectral_graph_forge.py",
"generators/expanders.py",
"linalg/algebraicconnectivity.py",
"linalg/attrmatrix.py",
"linalg/bethehessianmatrix.py",
"linalg/graphmatrix.py",
+ "linalg/laplacianmatrix.py",
"linalg/modularitymatrix.py",
"linalg/spectrum.py",
"utils/rcm.py",
- "algorithms/centrality/laplacian.py",
]
needs_matplotlib = ["drawing/nx_pylab.py"]
needs_pandas = ["convert_matrix.py"]
| {"golden_diff": "diff --git a/networkx/conftest.py b/networkx/conftest.py\n--- a/networkx/conftest.py\n+++ b/networkx/conftest.py\n@@ -241,25 +241,31 @@\n \"algorithms/centrality/current_flow_betweenness_subset.py\",\n \"algorithms/centrality/eigenvector.py\",\n \"algorithms/centrality/katz.py\",\n+ \"algorithms/centrality/laplacian.py\",\n \"algorithms/centrality/second_order.py\",\n \"algorithms/centrality/subgraph_alg.py\",\n \"algorithms/communicability_alg.py\",\n+ \"algorithms/community/divisive.py\",\n+ \"algorithms/distance_measures.py\",\n \"algorithms/link_analysis/hits_alg.py\",\n \"algorithms/link_analysis/pagerank_alg.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/similarity.py\",\n+ \"algorithms/tree/mst.py\",\n+ \"algorithms/walks.py\",\n \"convert_matrix.py\",\n \"drawing/layout.py\",\n+ \"drawing/nx_pylab.py\",\n \"generators/spectral_graph_forge.py\",\n \"generators/expanders.py\",\n \"linalg/algebraicconnectivity.py\",\n \"linalg/attrmatrix.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/graphmatrix.py\",\n+ \"linalg/laplacianmatrix.py\",\n \"linalg/modularitymatrix.py\",\n \"linalg/spectrum.py\",\n \"utils/rcm.py\",\n- \"algorithms/centrality/laplacian.py\",\n ]\n needs_matplotlib = [\"drawing/nx_pylab.py\"]\n needs_pandas = [\"convert_matrix.py\"]\n", "issue": "3.2.1: pytest fails in networkx/drawing/tests/test_pylab.py with `module 'matplotlib' has no attribute 'use'`\nmatplotlib 3.8.3 and pytest fails on scanning units with\r\n```console\r\n+ PYTHONPATH=/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib64/python3.9/site-packages:/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib/python3.9/site-packages\r\n+ /usr/bin/pytest -ra -m 'not network'\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.9.18, pytest-8.1.1, pluggy-1.4.0\r\nrootdir: /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1\r\nconfigfile: pyproject.toml\r\ncollected 4831 items / 1 error / 30 skipped\r\n\r\n==================================== ERRORS ====================================\r\n____________ ERROR collecting networkx/drawing/tests/test_pylab.py _____________\r\nnetworkx/drawing/tests/test_pylab.py:10: in <module>\r\n mpl.use(\"PS\")\r\nE AttributeError: module 'matplotlib' has no attribute 'use'\r\n=============================== warnings summary ===============================\r\nnetworkx/utils/backends.py:135\r\n /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:135: RuntimeWarning: networkx backend defined more than once: nx-loopback\r\n backends.update(_get_backends(\"networkx.backends\"))\r\n\r\nnetworkx/utils/backends.py:576\r\n /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:576: DeprecationWarning:\r\n\r\n random_tree is deprecated and will be removed in NX v3.4\r\n Use random_labeled_tree instead.\r\n return self.orig_func(*args, **kwargs)\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n```\n", "before_files": [{"content": "\"\"\"\nTesting\n=======\n\nGeneral guidelines for writing good tests:\n\n- doctests always assume ``import networkx as nx`` so don't add that\n- prefer pytest fixtures over classes with setup methods.\n- use the ``@pytest.mark.parametrize`` decorator\n- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.\n and add the module to the relevant entries below.\n\n\"\"\"\nimport os\nimport sys\nimport warnings\nfrom importlib.metadata import entry_points\n\nimport pytest\n\nimport networkx\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n parser.addoption(\n \"--backend\",\n action=\"store\",\n default=None,\n help=\"Run tests with a backend by auto-converting nx graphs to backend graphs\",\n )\n parser.addoption(\n \"--fallback-to-nx\",\n action=\"store_true\",\n default=False,\n help=\"Run nx function if a backend doesn't implement a dispatchable function\"\n \" (use with --backend)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n backend = config.getoption(\"--backend\")\n if backend is None:\n backend = os.environ.get(\"NETWORKX_TEST_BACKEND\")\n # nx-loopback backend is only available when testing\n backends = entry_points(name=\"nx-loopback\", group=\"networkx.backends\")\n if backends:\n networkx.utils.backends.backends[\"nx-loopback\"] = next(iter(backends))\n else:\n warnings.warn(\n \"\\n\\n WARNING: Mixed NetworkX configuration! \\n\\n\"\n \" This environment has mixed configuration for networkx.\\n\"\n \" The test object nx-loopback is not configured correctly.\\n\"\n \" You should not be seeing this message.\\n\"\n \" Try `pip install -e .`, or change your PYTHONPATH\\n\"\n \" Make sure python finds the networkx repo you are testing\\n\\n\"\n )\n if backend:\n networkx.config[\"backend_priority\"] = [backend]\n fallback_to_nx = config.getoption(\"--fallback-to-nx\")\n if not fallback_to_nx:\n fallback_to_nx = os.environ.get(\"NETWORKX_FALLBACK_TO_NX\")\n networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)\n\n\ndef pytest_collection_modifyitems(config, items):\n # Setting this to True here allows tests to be set up before dispatching\n # any function call to a backend.\n networkx.utils.backends._dispatchable._is_testing = True\n if backend_priority := networkx.config[\"backend_priority\"]:\n # Allow pluggable backends to add markers to tests (such as skip or xfail)\n # when running in auto-conversion test mode\n backend = networkx.utils.backends.backends[backend_priority[0]].load()\n if hasattr(backend, \"on_start_tests\"):\n getattr(backend, \"on_start_tests\")(items)\n\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n\n\n# TODO: The warnings below need to be dealt with, but for now we silence them.\[email protected](autouse=True)\ndef set_warnings():\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nsingle_target_shortest_path_length\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nshortest_path\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\nforest_str is deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_tree\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"Edmonds has been deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"MultiDiGraph_EdgeKey has been deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nThe `normalized`\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"The function `join` is deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"\\n\\nstrongly_connected_components_recursive\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nall_triplets\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_triad\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"minimal_d_separator\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"d_separated\"\n )\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, message=\"\\n\\nk_core\")\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_shell\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_crust\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_corona\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\ntotal_spanning_tree_weight\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=r\"\\n\\nThe 'create=matrix'\"\n )\n\n\[email protected](autouse=True)\ndef add_nx(doctest_namespace):\n doctest_namespace[\"nx\"] = networkx\n\n\n# What dependencies are installed?\n\ntry:\n import numpy\n\n has_numpy = True\nexcept ImportError:\n has_numpy = False\n\ntry:\n import scipy\n\n has_scipy = True\nexcept ImportError:\n has_scipy = False\n\ntry:\n import matplotlib\n\n has_matplotlib = True\nexcept ImportError:\n has_matplotlib = False\n\ntry:\n import pandas\n\n has_pandas = True\nexcept ImportError:\n has_pandas = False\n\ntry:\n import pygraphviz\n\n has_pygraphviz = True\nexcept ImportError:\n has_pygraphviz = False\n\ntry:\n import pydot\n\n has_pydot = True\nexcept ImportError:\n has_pydot = False\n\ntry:\n import sympy\n\n has_sympy = True\nexcept ImportError:\n has_sympy = False\n\n\n# List of files that pytest should ignore\n\ncollect_ignore = []\n\nneeds_numpy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/centrality/current_flow_closeness.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/non_randomness.py\",\n \"algorithms/shortest_paths/dense.py\",\n \"algorithms/tree/mst.py\",\n \"generators/expanders.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/laplacianmatrix.py\",\n \"utils/misc.py\",\n \"algorithms/centrality/laplacian.py\",\n]\nneeds_scipy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/assortativity/correlation.py\",\n \"algorithms/assortativity/mixing.py\",\n \"algorithms/assortativity/pairs.py\",\n \"algorithms/bipartite/matrix.py\",\n \"algorithms/bipartite/spectral.py\",\n \"algorithms/centrality/current_flow_betweenness.py\",\n \"algorithms/centrality/current_flow_betweenness_subset.py\",\n \"algorithms/centrality/eigenvector.py\",\n \"algorithms/centrality/katz.py\",\n \"algorithms/centrality/second_order.py\",\n \"algorithms/centrality/subgraph_alg.py\",\n \"algorithms/communicability_alg.py\",\n \"algorithms/link_analysis/hits_alg.py\",\n \"algorithms/link_analysis/pagerank_alg.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/similarity.py\",\n \"convert_matrix.py\",\n \"drawing/layout.py\",\n \"generators/spectral_graph_forge.py\",\n \"generators/expanders.py\",\n \"linalg/algebraicconnectivity.py\",\n \"linalg/attrmatrix.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/graphmatrix.py\",\n \"linalg/modularitymatrix.py\",\n \"linalg/spectrum.py\",\n \"utils/rcm.py\",\n \"algorithms/centrality/laplacian.py\",\n]\nneeds_matplotlib = [\"drawing/nx_pylab.py\"]\nneeds_pandas = [\"convert_matrix.py\"]\nneeds_pygraphviz = [\"drawing/nx_agraph.py\"]\nneeds_pydot = [\"drawing/nx_pydot.py\"]\nneeds_sympy = [\"algorithms/polynomials.py\"]\n\nif not has_numpy:\n collect_ignore += needs_numpy\nif not has_scipy:\n collect_ignore += needs_scipy\nif not has_matplotlib:\n collect_ignore += needs_matplotlib\nif not has_pandas:\n collect_ignore += needs_pandas\nif not has_pygraphviz:\n collect_ignore += needs_pygraphviz\nif not has_pydot:\n collect_ignore += needs_pydot\nif not has_sympy:\n collect_ignore += needs_sympy\n", "path": "networkx/conftest.py"}]} | 3,884 | 382 |
gh_patches_debug_1885 | rasdani/github-patches | git_diff | ansible__ansible-17707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lookup properties
<!--- Verify first that your issue/request is not already reported in GitHub -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400)
lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400)
lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400)
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
##### OS / ENVIRONMENT
"N/A"
##### SUMMARY
[commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d)
this commit breaks `lookup` for `type=properties`
##### STEPS TO REPRODUCE
just try to read some properties through `lookup`
```
- name: lookup
vars:
property_value: "{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}"
debug: msg="{{ property_value }}"
```
##### EXPECTED RESULTS
read correct value from property file
##### ACTUAL RESULTS
```
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 512, in _read
raise MissingSectionHeaderError(fpname, lineno, line)
MissingSectionHeaderError: File contains no section headers.
file: /config.properties, line: 3
'environment=dev\n'
fatal: [localhost]: FAILED! => {"failed": true, "msg": "Unexpected failure during module execution.", "stdout": ""}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @test.retry
PLAY RECAP *********************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=1
```
By the way, it would be great to implement 'autofill' properties
```
host=127.0.0.1
api.host=${host}
```
cc @jctanner
</issue>
<code>
[start of lib/ansible/plugins/lookup/ini.py]
1 # (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 from __future__ import (absolute_import, division, print_function)
18 __metaclass__ = type
19
20 from io import StringIO
21 import os
22 import re
23
24 try:
25 # python2
26 import ConfigParser as configparser
27 except ImportError:
28 # python3
29 import configparser
30
31 from ansible.errors import AnsibleError
32 from ansible.plugins.lookup import LookupBase
33 from ansible.module_utils._text import to_bytes, to_text
34
35
36 def _parse_params(term):
37 '''Safely split parameter term to preserve spaces'''
38
39 keys = ['key', 'section', 'file', 're']
40 params = {}
41 for k in keys:
42 params[k] = ''
43
44 thiskey = 'key'
45 for idp,phrase in enumerate(term.split()):
46 for k in keys:
47 if ('%s=' % k) in phrase:
48 thiskey = k
49 if idp == 0 or not params[thiskey]:
50 params[thiskey] = phrase
51 else:
52 params[thiskey] += ' ' + phrase
53
54 rparams = [params[x] for x in keys if params[x]]
55 return rparams
56
57
58 class LookupModule(LookupBase):
59
60 def read_properties(self, filename, key, dflt, is_regexp):
61 config = StringIO()
62 current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
63
64 config.write(u'[java_properties]\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))
65 config.seek(0, os.SEEK_SET)
66 self.cp.readfp(config)
67 return self.get_value(key, 'java_properties', dflt, is_regexp)
68
69 def read_ini(self, filename, key, section, dflt, is_regexp):
70 self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))
71 return self.get_value(key, section, dflt, is_regexp)
72
73 def get_value(self, key, section, dflt, is_regexp):
74 # Retrieve all values from a section using a regexp
75 if is_regexp:
76 return [v for k, v in self.cp.items(section) if re.match(key, k)]
77 value = None
78 # Retrieve a single value
79 try:
80 value = self.cp.get(section, key)
81 except configparser.NoOptionError:
82 return dflt
83 return value
84
85 def run(self, terms, variables=None, **kwargs):
86
87 basedir = self.get_basedir(variables)
88 self.basedir = basedir
89 self.cp = configparser.ConfigParser()
90
91 ret = []
92 for term in terms:
93 params = _parse_params(term)
94 key = params[0]
95
96 paramvals = {
97 'file' : 'ansible.ini',
98 're' : False,
99 'default' : None,
100 'section' : "global",
101 'type' : "ini",
102 }
103
104 # parameters specified?
105 try:
106 for param in params[1:]:
107 name, value = param.split('=')
108 assert(name in paramvals)
109 paramvals[name] = value
110 except (ValueError, AssertionError) as e:
111 raise AnsibleError(e)
112
113 path = self.find_file_in_search_path(variables, 'files', paramvals['file'])
114 if paramvals['type'] == "properties":
115 var = self.read_properties(path, key, paramvals['default'], paramvals['re'])
116 else:
117 var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])
118 if var is not None:
119 if type(var) is list:
120 for v in var:
121 ret.append(v)
122 else:
123 ret.append(var)
124 return ret
125
[end of lib/ansible/plugins/lookup/ini.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
--- a/lib/ansible/plugins/lookup/ini.py
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -36,7 +36,7 @@
def _parse_params(term):
'''Safely split parameter term to preserve spaces'''
- keys = ['key', 'section', 'file', 're']
+ keys = ['key', 'type', 'section', 'file', 're']
params = {}
for k in keys:
params[k] = ''
| {"golden_diff": "diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py\n--- a/lib/ansible/plugins/lookup/ini.py\n+++ b/lib/ansible/plugins/lookup/ini.py\n@@ -36,7 +36,7 @@\n def _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n \n- keys = ['key', 'section', 'file', 're']\n+ keys = ['key', 'type', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n", "issue": "lookup properties\n<!--- Verify first that your issue/request is not already reported in GitHub -->\n##### ISSUE TYPE\n\n<!--- Pick one below and delete the rest: -->\n- Bug Report\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\nansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400)\n lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400)\n lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400)\n config file = /etc/ansible/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n\n\"N/A\"\n##### SUMMARY\n\n[commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d)\nthis commit breaks `lookup` for `type=properties`\n##### STEPS TO REPRODUCE\n\njust try to read some properties through `lookup`\n\n```\n- name: lookup\n vars:\n property_value: \"{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}\"\n debug: msg=\"{{ property_value }}\"\n```\n##### EXPECTED RESULTS\n\nread correct value from property file\n##### ACTUAL RESULTS\n\n```\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py\", line 512, in _read\n raise MissingSectionHeaderError(fpname, lineno, line)\nMissingSectionHeaderError: File contains no section headers.\nfile: /config.properties, line: 3\n'environment=dev\\n'\n\nfatal: [localhost]: FAILED! => {\"failed\": true, \"msg\": \"Unexpected failure during module execution.\", \"stdout\": \"\"}\n\nNO MORE HOSTS LEFT *************************************************************\n to retry, use: --limit @test.retry\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=1 changed=0 unreachable=0 failed=1\n```\n\nBy the way, it would be great to implement 'autofill' properties\n\n```\nhost=127.0.0.1\napi.host=${host}\n```\n\ncc @jctanner \n\n", "before_files": [{"content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom io import StringIO\nimport os\nimport re\n\ntry:\n # python2\n import ConfigParser as configparser\nexcept ImportError:\n # python3\n import configparser\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.module_utils._text import to_bytes, to_text\n\n\ndef _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n\n keys = ['key', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n\n thiskey = 'key'\n for idp,phrase in enumerate(term.split()):\n for k in keys:\n if ('%s=' % k) in phrase:\n thiskey = k\n if idp == 0 or not params[thiskey]:\n params[thiskey] = phrase\n else:\n params[thiskey] += ' ' + phrase\n\n rparams = [params[x] for x in keys if params[x]]\n return rparams\n\n\nclass LookupModule(LookupBase):\n\n def read_properties(self, filename, key, dflt, is_regexp):\n config = StringIO()\n current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')\n\n config.write(u'[java_properties]\\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))\n config.seek(0, os.SEEK_SET)\n self.cp.readfp(config)\n return self.get_value(key, 'java_properties', dflt, is_regexp)\n\n def read_ini(self, filename, key, section, dflt, is_regexp):\n self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))\n return self.get_value(key, section, dflt, is_regexp)\n\n def get_value(self, key, section, dflt, is_regexp):\n # Retrieve all values from a section using a regexp\n if is_regexp:\n return [v for k, v in self.cp.items(section) if re.match(key, k)]\n value = None\n # Retrieve a single value\n try:\n value = self.cp.get(section, key)\n except configparser.NoOptionError:\n return dflt\n return value\n\n def run(self, terms, variables=None, **kwargs):\n\n basedir = self.get_basedir(variables)\n self.basedir = basedir\n self.cp = configparser.ConfigParser()\n\n ret = []\n for term in terms:\n params = _parse_params(term)\n key = params[0]\n\n paramvals = {\n 'file' : 'ansible.ini',\n 're' : False,\n 'default' : None,\n 'section' : \"global\",\n 'type' : \"ini\",\n }\n\n # parameters specified?\n try:\n for param in params[1:]:\n name, value = param.split('=')\n assert(name in paramvals)\n paramvals[name] = value\n except (ValueError, AssertionError) as e:\n raise AnsibleError(e)\n\n path = self.find_file_in_search_path(variables, 'files', paramvals['file'])\n if paramvals['type'] == \"properties\":\n var = self.read_properties(path, key, paramvals['default'], paramvals['re'])\n else:\n var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])\n if var is not None:\n if type(var) is list:\n for v in var:\n ret.append(v)\n else:\n ret.append(var)\n return ret\n", "path": "lib/ansible/plugins/lookup/ini.py"}]} | 2,394 | 130 |
gh_patches_debug_25790 | rasdani/github-patches | git_diff | pre-commit__pre-commit-244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unstaged check should not complain when running --all-files
```
$ pre-commit run --all-files
[ERROR] Your .pre-commit-config.yaml is unstaged.
`git add .pre-commit-config.yaml` to fix this.
Run pre-commit with --allow-unstaged-config to silence this.
```
</issue>
<code>
[start of pre_commit/commands/run.py]
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import logging
5 import os
6 import sys
7
8 from pre_commit import color
9 from pre_commit import git
10 from pre_commit.logging_handler import LoggingHandler
11 from pre_commit.output import get_hook_message
12 from pre_commit.output import sys_stdout_write_wrapper
13 from pre_commit.staged_files_only import staged_files_only
14 from pre_commit.util import cmd_output
15 from pre_commit.util import noop_context
16
17
18 logger = logging.getLogger('pre_commit')
19
20
21 def _get_skips(environ):
22 skips = environ.get('SKIP', '')
23 return set(skip.strip() for skip in skips.split(',') if skip.strip())
24
25
26 def _hook_msg_start(hook, verbose):
27 return '{0}{1}'.format(
28 '[{0}] '.format(hook['id']) if verbose else '',
29 hook['name'],
30 )
31
32
33 def _print_no_files_skipped(hook, write, args):
34 write(get_hook_message(
35 _hook_msg_start(hook, args.verbose),
36 postfix='(no files to check) ',
37 end_msg='Skipped',
38 end_color=color.TURQUOISE,
39 use_color=args.color,
40 ))
41
42
43 def _print_user_skipped(hook, write, args):
44 write(get_hook_message(
45 _hook_msg_start(hook, args.verbose),
46 end_msg='Skipped',
47 end_color=color.YELLOW,
48 use_color=args.color,
49 ))
50
51
52 def get_changed_files(new, old):
53 return cmd_output(
54 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),
55 )[1].splitlines()
56
57
58 def get_filenames(args, include_expr, exclude_expr):
59 if args.origin and args.source:
60 getter = git.get_files_matching(
61 lambda: get_changed_files(args.origin, args.source),
62 )
63 elif args.files:
64 getter = git.get_files_matching(lambda: args.files)
65 elif args.all_files:
66 getter = git.get_all_files_matching
67 elif git.is_in_merge_conflict():
68 getter = git.get_conflicted_files_matching
69 else:
70 getter = git.get_staged_files_matching
71 return getter(include_expr, exclude_expr)
72
73
74 def _run_single_hook(hook, repo, args, write, skips=frozenset()):
75 filenames = get_filenames(args, hook['files'], hook['exclude'])
76 if hook['id'] in skips:
77 _print_user_skipped(hook, write, args)
78 return 0
79 elif not filenames:
80 _print_no_files_skipped(hook, write, args)
81 return 0
82
83 # Print the hook and the dots first in case the hook takes hella long to
84 # run.
85 write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
86 sys.stdout.flush()
87
88 retcode, stdout, stderr = repo.run_hook(hook, filenames)
89
90 if retcode != hook['expected_return_value']:
91 retcode = 1
92 print_color = color.RED
93 pass_fail = 'Failed'
94 else:
95 retcode = 0
96 print_color = color.GREEN
97 pass_fail = 'Passed'
98
99 write(color.format_color(pass_fail, print_color, args.color) + '\n')
100
101 if (stdout or stderr) and (retcode or args.verbose):
102 write('hookid: {0}\n'.format(hook['id']))
103 write('\n')
104 for output in (stdout, stderr):
105 if output.strip():
106 write(output.strip() + '\n')
107 write('\n')
108
109 return retcode
110
111
112 def _run_hooks(repo_hooks, args, write, environ):
113 """Actually run the hooks."""
114 skips = _get_skips(environ)
115 retval = 0
116 for repo, hook in repo_hooks:
117 retval |= _run_single_hook(hook, repo, args, write, skips)
118 return retval
119
120
121 def get_repo_hooks(runner):
122 for repo in runner.repositories:
123 for _, hook in repo.hooks:
124 yield (repo, hook)
125
126
127 def _has_unmerged_paths(runner):
128 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
129 return bool(stdout.strip())
130
131
132 def _has_unstaged_config(runner):
133 retcode, _, _ = runner.cmd_runner.run(
134 ('git', 'diff', '--exit-code', runner.config_file_path),
135 retcode=None,
136 )
137 # be explicit, other git errors don't mean it has an unstaged config.
138 return retcode == 1
139
140
141 def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
142 # Set up our logging handler
143 logger.addHandler(LoggingHandler(args.color, write=write))
144 logger.setLevel(logging.INFO)
145
146 # Check if we have unresolved merge conflict files and fail fast.
147 if _has_unmerged_paths(runner):
148 logger.error('Unmerged files. Resolve before committing.')
149 return 1
150 if bool(args.source) != bool(args.origin):
151 logger.error('Specify both --origin and --source.')
152 return 1
153 if _has_unstaged_config(runner) and not args.no_stash:
154 if args.allow_unstaged_config:
155 logger.warn(
156 'You have an unstaged config file and have specified the '
157 '--allow-unstaged-config option.\n'
158 'Note that your config will be stashed before the config is '
159 'parsed unless --no-stash is specified.',
160 )
161 else:
162 logger.error(
163 'Your .pre-commit-config.yaml is unstaged.\n'
164 '`git add .pre-commit-config.yaml` to fix this.\n'
165 'Run pre-commit with --allow-unstaged-config to silence this.'
166 )
167 return 1
168
169 # Don't stash if specified or files are specified
170 if args.no_stash or args.all_files or args.files:
171 ctx = noop_context()
172 else:
173 ctx = staged_files_only(runner.cmd_runner)
174
175 with ctx:
176 repo_hooks = list(get_repo_hooks(runner))
177 if args.hook:
178 repo_hooks = [
179 (repo, hook) for repo, hook in repo_hooks
180 if hook['id'] == args.hook
181 ]
182 if not repo_hooks:
183 write('No hook with id `{0}`\n'.format(args.hook))
184 return 1
185 return _run_hooks(repo_hooks, args, write, environ)
186
[end of pre_commit/commands/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -139,6 +139,7 @@
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
+ no_stash = args.no_stash or args.all_files or bool(args.files)
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
@@ -150,7 +151,7 @@
if bool(args.source) != bool(args.origin):
logger.error('Specify both --origin and --source.')
return 1
- if _has_unstaged_config(runner) and not args.no_stash:
+ if _has_unstaged_config(runner) and not no_stash:
if args.allow_unstaged_config:
logger.warn(
'You have an unstaged config file and have specified the '
@@ -166,8 +167,7 @@
)
return 1
- # Don't stash if specified or files are specified
- if args.no_stash or args.all_files or args.files:
+ if no_stash:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
| {"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -139,6 +139,7 @@\n \n \n def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n+ no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n@@ -150,7 +151,7 @@\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n- if _has_unstaged_config(runner) and not args.no_stash:\n+ if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n@@ -166,8 +167,7 @@\n )\n return 1\n \n- # Don't stash if specified or files are specified\n- if args.no_stash or args.all_files or args.files:\n+ if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n", "issue": "Unstaged check should not complain when running --all-files\n```\n$ pre-commit run --all-files\n[ERROR] Your .pre-commit-config.yaml is unstaged.\n`git add .pre-commit-config.yaml` to fix this.\nRun pre-commit with --allow-unstaged-config to silence this.\n```\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode != hook['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not args.no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n # Don't stash if specified or files are specified\n if args.no_stash or args.all_files or args.files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}]} | 2,437 | 296 |
gh_patches_debug_3949 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-3999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
String interpolation with no params in utils.db
https://github.com/RedHatQE/cfme_tests/blob/master/utils/db.py#L297
</issue>
<code>
[start of utils/db.py]
1 from collections import Mapping
2 from contextlib import contextmanager
3 from itertools import izip
4
5 from cached_property import cached_property
6 from sqlalchemy import MetaData, create_engine, event, inspect
7 from sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError
8 from sqlalchemy.ext.declarative import declarative_base
9 from sqlalchemy.orm import sessionmaker
10 from sqlalchemy.pool import Pool
11
12 from fixtures.pytest_store import store
13 from utils import conf, ports, version
14 from utils.log import logger
15
16
17 @event.listens_for(Pool, "checkout")
18 def ping_connection(dbapi_connection, connection_record, connection_proxy):
19 """ping_connection event hook, used to reconnect db sessions that time out
20
21 Note:
22
23 See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>`
24
25 """
26 cursor = dbapi_connection.cursor()
27 try:
28 cursor.execute("SELECT 1")
29 except StandardError:
30 raise DisconnectionError
31 cursor.close()
32
33
34 def scl_name():
35 # postgres's version is in the service name and file paths when we pull it from SCL,
36 # so this is a little resolver to help keep the version picking centralized
37 return version.pick({
38 version.LOWEST: 'postgresql92',
39 '5.5': 'rh-postgresql94',
40 '5.7': 'rh-postgresql95'
41 })
42
43
44 class Db(Mapping):
45 """Helper class for interacting with a CFME database using SQLAlchemy
46
47 Args:
48 hostname: base url to be used (default is from current_appliance)
49 credentials: name of credentials to use from :py:attr:`utils.conf.credentials`
50 (default ``database``)
51
52 Provides convient attributes to common sqlalchemy objects related to this DB,
53 as well as a Mapping interface to access and reflect database tables. Where possible,
54 attributes are cached.
55
56 Db objects support getting tables by name via the mapping interface::
57
58 table = db['table_name']
59
60 Usage:
61
62 # Usually used to query the DB for info, here's a common query
63 for vm in db.session.query(db['vms']).all():
64 print(vm.name)
65 print(vm.guid)
66
67 # List comprehension to get all templates
68 [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True]
69
70 # Use the transaction manager for write operations:
71 with db.transaction:
72 db.session.query(db['vms']).all().delete()
73
74 Note:
75
76 Creating a table object requires a call to the database so that SQLAlchemy can do
77 reflection to determine the table's structure (columns, keys, indices, etc). On
78 a latent connection, this can be extremely slow, which will affect methods that return
79 tables, like the mapping interface or :py:meth:`values`.
80
81 """
82 def __init__(self, hostname=None, credentials=None):
83 self._table_cache = {}
84 if hostname is None:
85 self.hostname = store.current_appliance.db_address
86 else:
87 self.hostname = hostname
88
89 self.credentials = credentials or conf.credentials['database']
90
91 def __getitem__(self, table_name):
92 """Access tables as items contained in this db
93
94 Usage:
95
96 # To get a table called 'table_name':
97 db['table_name']
98
99 This may return ``None`` in the case where a table is found but reflection fails.
100
101 """
102 try:
103 return self._table(table_name)
104 except InvalidRequestError:
105 raise KeyError('Table {} could not be found'.format(table_name))
106
107 def __iter__(self):
108 """Iterator of table names in this db"""
109 return self.keys()
110
111 def __len__(self):
112 """Number of tables in this db"""
113 return len(self.table_names)
114
115 def __contains__(self, table_name):
116 """Whether or not the named table is in this db"""
117 return table_name in self.table_names
118
119 def keys(self):
120 """Iterator of table names in this db"""
121 return (table_name for table_name in self.table_names)
122
123 def items(self):
124 """Iterator of ``(table_name, table)`` pairs"""
125 return izip(self.keys(), self.values())
126
127 def values(self):
128 """Iterator of tables in this db"""
129 return (self[table_name] for table_name in self.table_names)
130
131 def get(self, table_name, default=None):
132 """table getter
133
134 Args:
135 table_name: Name of the table to get
136 default: Default value to return if ``table_name`` is not found.
137
138 Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default
139
140 """
141 try:
142 return self[table_name]
143 except KeyError:
144 return default
145
146 def copy(self):
147 """Copy this database instance, keeping the same credentials and hostname"""
148 return type(self)(self.hostname, self.credentials)
149
150 def __eq__(self, other):
151 """Check if this db is equal to another db"""
152 try:
153 return self.hostname == other.hostname
154 except:
155 return False
156
157 def __ne__(self, other):
158 """Check if this db is not equal to another db"""
159 return not self == other
160
161 @cached_property
162 def engine(self):
163 """The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database
164
165 It uses pessimistic disconnection handling, checking that the database is still
166 connected before executing commands.
167
168 """
169 return create_engine(self.db_url, echo_pool=True)
170
171 @cached_property
172 def sessionmaker(self):
173 """A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>`
174
175 Used to make new sessions with this database, as needed.
176
177 """
178 return sessionmaker(bind=self.engine)
179
180 @cached_property
181 def table_base(self):
182 """Base class for all tables returned by this database
183
184 This base class is created using
185 :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`.
186 """
187 return declarative_base(metadata=self.metadata)
188
189 @cached_property
190 def metadata(self):
191 """:py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database
192
193 This can be used for introspection of reflected items.
194
195 Note:
196
197 Tables that haven't been reflected won't show up in metadata. To reflect a table,
198 use :py:meth:`reflect_table`.
199
200 """
201 return MetaData(bind=self.engine)
202
203 @cached_property
204 def db_url(self):
205 """The connection URL for this database, including credentials"""
206 template = "postgresql://{username}:{password}@{host}:{port}/vmdb_production"
207 result = template.format(host=self.hostname, port=ports.DB, **self.credentials)
208 logger.info("[DB] db_url is %s", result)
209 return result
210
211 @cached_property
212 def table_names(self):
213 """A sorted list of table names available in this database."""
214 # rails table names follow similar rules as pep8 identifiers; expose them as such
215 return sorted(inspect(self.engine).get_table_names())
216
217 @cached_property
218 def session(self):
219 """Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>`
220
221 This is used for database queries. For writing to the database, start a
222 :py:meth:`transaction`.
223
224 Note:
225
226 This attribute is cached. In cases where a new session needs to be explicitly created,
227 use :py:meth:`sessionmaker`.
228
229 """
230 return self.sessionmaker(autocommit=True)
231
232 @property
233 @contextmanager
234 def transaction(self):
235 """Context manager for simple transaction management
236
237 Sessions understand the concept of transactions, and provider context managers to
238 handle conditionally committing or rolling back transactions as needed.
239
240 Note:
241
242 Sessions automatically commit transactions by default. For predictable results when
243 writing to the database, use the transaction manager.
244
245 Usage:
246
247 with db.transaction:
248 db.session.do_something()
249
250 """
251 with self.session.begin():
252 yield
253
254 def reflect_table(self, table_name):
255 """Populate :py:attr:`metadata` with information on a table
256
257 Args:
258 table_name: The name of a table to reflect
259
260 """
261 self.metadata.reflect(only=[table_name])
262
263 def _table(self, table_name):
264 """Retrieves, reflects, and caches table objects
265
266 Actual implementation of __getitem__
267 """
268 try:
269 return self._table_cache[table_name]
270 except KeyError:
271 self.reflect_table(table_name)
272 table = self.metadata.tables[table_name]
273 table_dict = {
274 '__table__': table,
275 '__tablename__': table_name
276 }
277
278 try:
279 table_cls = type(str(table_name), (self.table_base,), table_dict)
280 self._table_cache[table_name] = table_cls
281 return table_cls
282 except ArgumentError:
283 # This usually happens on join tables with no PKs
284 logger.info('Unable to create table class for table "%s"')
285 return None
286
287
288 @contextmanager
289 def database_on_server(hostname, **kwargs):
290 db_obj = Db(hostname=hostname, **kwargs)
291 yield db_obj
292
293
294 #: :py:class:`Db` instance configured with default settings from conf yamls
295 def cfmedb():
296 return store.current_appliance.db
297
[end of utils/db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/db.py b/utils/db.py
--- a/utils/db.py
+++ b/utils/db.py
@@ -281,7 +281,7 @@
return table_cls
except ArgumentError:
# This usually happens on join tables with no PKs
- logger.info('Unable to create table class for table "%s"')
+ logger.info('Unable to create table class for table "{}"'.format(table_name))
return None
| {"golden_diff": "diff --git a/utils/db.py b/utils/db.py\n--- a/utils/db.py\n+++ b/utils/db.py\n@@ -281,7 +281,7 @@\n return table_cls\n except ArgumentError:\n # This usually happens on join tables with no PKs\n- logger.info('Unable to create table class for table \"%s\"')\n+ logger.info('Unable to create table class for table \"{}\"'.format(table_name))\n return None\n", "issue": "String interpolation with no params in utils.db\nhttps://github.com/RedHatQE/cfme_tests/blob/master/utils/db.py#L297\n\n", "before_files": [{"content": "from collections import Mapping\nfrom contextlib import contextmanager\nfrom itertools import izip\n\nfrom cached_property import cached_property\nfrom sqlalchemy import MetaData, create_engine, event, inspect\nfrom sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import Pool\n\nfrom fixtures.pytest_store import store\nfrom utils import conf, ports, version\nfrom utils.log import logger\n\n\[email protected]_for(Pool, \"checkout\")\ndef ping_connection(dbapi_connection, connection_record, connection_proxy):\n \"\"\"ping_connection event hook, used to reconnect db sessions that time out\n\n Note:\n\n See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>`\n\n \"\"\"\n cursor = dbapi_connection.cursor()\n try:\n cursor.execute(\"SELECT 1\")\n except StandardError:\n raise DisconnectionError\n cursor.close()\n\n\ndef scl_name():\n # postgres's version is in the service name and file paths when we pull it from SCL,\n # so this is a little resolver to help keep the version picking centralized\n return version.pick({\n version.LOWEST: 'postgresql92',\n '5.5': 'rh-postgresql94',\n '5.7': 'rh-postgresql95'\n })\n\n\nclass Db(Mapping):\n \"\"\"Helper class for interacting with a CFME database using SQLAlchemy\n\n Args:\n hostname: base url to be used (default is from current_appliance)\n credentials: name of credentials to use from :py:attr:`utils.conf.credentials`\n (default ``database``)\n\n Provides convient attributes to common sqlalchemy objects related to this DB,\n as well as a Mapping interface to access and reflect database tables. Where possible,\n attributes are cached.\n\n Db objects support getting tables by name via the mapping interface::\n\n table = db['table_name']\n\n Usage:\n\n # Usually used to query the DB for info, here's a common query\n for vm in db.session.query(db['vms']).all():\n print(vm.name)\n print(vm.guid)\n\n # List comprehension to get all templates\n [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True]\n\n # Use the transaction manager for write operations:\n with db.transaction:\n db.session.query(db['vms']).all().delete()\n\n Note:\n\n Creating a table object requires a call to the database so that SQLAlchemy can do\n reflection to determine the table's structure (columns, keys, indices, etc). On\n a latent connection, this can be extremely slow, which will affect methods that return\n tables, like the mapping interface or :py:meth:`values`.\n\n \"\"\"\n def __init__(self, hostname=None, credentials=None):\n self._table_cache = {}\n if hostname is None:\n self.hostname = store.current_appliance.db_address\n else:\n self.hostname = hostname\n\n self.credentials = credentials or conf.credentials['database']\n\n def __getitem__(self, table_name):\n \"\"\"Access tables as items contained in this db\n\n Usage:\n\n # To get a table called 'table_name':\n db['table_name']\n\n This may return ``None`` in the case where a table is found but reflection fails.\n\n \"\"\"\n try:\n return self._table(table_name)\n except InvalidRequestError:\n raise KeyError('Table {} could not be found'.format(table_name))\n\n def __iter__(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return self.keys()\n\n def __len__(self):\n \"\"\"Number of tables in this db\"\"\"\n return len(self.table_names)\n\n def __contains__(self, table_name):\n \"\"\"Whether or not the named table is in this db\"\"\"\n return table_name in self.table_names\n\n def keys(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return (table_name for table_name in self.table_names)\n\n def items(self):\n \"\"\"Iterator of ``(table_name, table)`` pairs\"\"\"\n return izip(self.keys(), self.values())\n\n def values(self):\n \"\"\"Iterator of tables in this db\"\"\"\n return (self[table_name] for table_name in self.table_names)\n\n def get(self, table_name, default=None):\n \"\"\"table getter\n\n Args:\n table_name: Name of the table to get\n default: Default value to return if ``table_name`` is not found.\n\n Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default\n\n \"\"\"\n try:\n return self[table_name]\n except KeyError:\n return default\n\n def copy(self):\n \"\"\"Copy this database instance, keeping the same credentials and hostname\"\"\"\n return type(self)(self.hostname, self.credentials)\n\n def __eq__(self, other):\n \"\"\"Check if this db is equal to another db\"\"\"\n try:\n return self.hostname == other.hostname\n except:\n return False\n\n def __ne__(self, other):\n \"\"\"Check if this db is not equal to another db\"\"\"\n return not self == other\n\n @cached_property\n def engine(self):\n \"\"\"The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database\n\n It uses pessimistic disconnection handling, checking that the database is still\n connected before executing commands.\n\n \"\"\"\n return create_engine(self.db_url, echo_pool=True)\n\n @cached_property\n def sessionmaker(self):\n \"\"\"A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>`\n\n Used to make new sessions with this database, as needed.\n\n \"\"\"\n return sessionmaker(bind=self.engine)\n\n @cached_property\n def table_base(self):\n \"\"\"Base class for all tables returned by this database\n\n This base class is created using\n :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`.\n \"\"\"\n return declarative_base(metadata=self.metadata)\n\n @cached_property\n def metadata(self):\n \"\"\":py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database\n\n This can be used for introspection of reflected items.\n\n Note:\n\n Tables that haven't been reflected won't show up in metadata. To reflect a table,\n use :py:meth:`reflect_table`.\n\n \"\"\"\n return MetaData(bind=self.engine)\n\n @cached_property\n def db_url(self):\n \"\"\"The connection URL for this database, including credentials\"\"\"\n template = \"postgresql://{username}:{password}@{host}:{port}/vmdb_production\"\n result = template.format(host=self.hostname, port=ports.DB, **self.credentials)\n logger.info(\"[DB] db_url is %s\", result)\n return result\n\n @cached_property\n def table_names(self):\n \"\"\"A sorted list of table names available in this database.\"\"\"\n # rails table names follow similar rules as pep8 identifiers; expose them as such\n return sorted(inspect(self.engine).get_table_names())\n\n @cached_property\n def session(self):\n \"\"\"Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>`\n\n This is used for database queries. For writing to the database, start a\n :py:meth:`transaction`.\n\n Note:\n\n This attribute is cached. In cases where a new session needs to be explicitly created,\n use :py:meth:`sessionmaker`.\n\n \"\"\"\n return self.sessionmaker(autocommit=True)\n\n @property\n @contextmanager\n def transaction(self):\n \"\"\"Context manager for simple transaction management\n\n Sessions understand the concept of transactions, and provider context managers to\n handle conditionally committing or rolling back transactions as needed.\n\n Note:\n\n Sessions automatically commit transactions by default. For predictable results when\n writing to the database, use the transaction manager.\n\n Usage:\n\n with db.transaction:\n db.session.do_something()\n\n \"\"\"\n with self.session.begin():\n yield\n\n def reflect_table(self, table_name):\n \"\"\"Populate :py:attr:`metadata` with information on a table\n\n Args:\n table_name: The name of a table to reflect\n\n \"\"\"\n self.metadata.reflect(only=[table_name])\n\n def _table(self, table_name):\n \"\"\"Retrieves, reflects, and caches table objects\n\n Actual implementation of __getitem__\n \"\"\"\n try:\n return self._table_cache[table_name]\n except KeyError:\n self.reflect_table(table_name)\n table = self.metadata.tables[table_name]\n table_dict = {\n '__table__': table,\n '__tablename__': table_name\n }\n\n try:\n table_cls = type(str(table_name), (self.table_base,), table_dict)\n self._table_cache[table_name] = table_cls\n return table_cls\n except ArgumentError:\n # This usually happens on join tables with no PKs\n logger.info('Unable to create table class for table \"%s\"')\n return None\n\n\n@contextmanager\ndef database_on_server(hostname, **kwargs):\n db_obj = Db(hostname=hostname, **kwargs)\n yield db_obj\n\n\n#: :py:class:`Db` instance configured with default settings from conf yamls\ndef cfmedb():\n return store.current_appliance.db\n", "path": "utils/db.py"}]} | 3,381 | 99 |
gh_patches_debug_6107 | rasdani/github-patches | git_diff | pytorch__vision-8227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`CocoDetection` accepts `slice` as argument to its `__getitem__` and returns wrong result
Hi, although `CocoDetection` declares its `__getitem__` argument to be of type `int`, it actually accepts `slice` as well. However it returns a wrong result. As this might be confusing for some users, I suggest to add a type check at the beginning of the `__getitem__`.
https://github.com/pytorch/vision/blob/cab01fc1b7c7f0620ab95c6064f2a3fb583e9bee/torchvision/datasets/coco.py#L46-L49
```python
ds[:2]
```
```
(tensor([[[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
...,
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235]],
[[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
...,
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353]],
[[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
...,
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020]]]),
{'category_id': tensor([2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
1]),
'bbox': tensor([[ 390, 240, 58, 92],
[ 391, 319, 66, 99],
[ 663, 331, 54, 106],
[ 796, 324, 50, 102],
[1245, 276, 35, 117],
[ 599, 265, 58, 91],
[ 755, 228, 42, 91],
[ 788, 261, 61, 97],
[ 250, 374, 73, 105],
[ 224, 449, 29, 28],
[ 12, 85, 13, 32],
[ 88, 70, 13, 27],
[ 102, 99, 14, 33],
[ 100, 132, 14, 32],
[ 151, 160, 13, 34],
[ 177, 115, 18, 32],
[ 214, 73, 12, 28],
[ 267, 58, 13, 23],
[ 358, 71, 14, 33],
[ 356, 160, 17, 36],
[ 231, 132, 14, 33],
[ 234, 88, 16, 29],
[ 248, 90, 13, 25],
[ 60, 109, 13, 32],
[ 243, 75, 7, 5]]),
'iscrowd': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0])})
```
The image in the returned tuple is that of the `ds[0]` and the target is the combined annotations of `ds[0]` and `ds[1]` which is wrong. This is due to the fact that `ds.coco` could work with a list of inputs in its `getAnnIds()`.
cc @pmeier
</issue>
<code>
[start of torchvision/datasets/coco.py]
1 import os.path
2 from typing import Any, Callable, List, Optional, Tuple
3
4 from PIL import Image
5
6 from .vision import VisionDataset
7
8
9 class CocoDetection(VisionDataset):
10 """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
11
12 It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
13
14 Args:
15 root (string): Root directory where images are downloaded to.
16 annFile (string): Path to json annotation file.
17 transform (callable, optional): A function/transform that takes in a PIL image
18 and returns a transformed version. E.g, ``transforms.PILToTensor``
19 target_transform (callable, optional): A function/transform that takes in the
20 target and transforms it.
21 transforms (callable, optional): A function/transform that takes input sample and its target as entry
22 and returns a transformed version.
23 """
24
25 def __init__(
26 self,
27 root: str,
28 annFile: str,
29 transform: Optional[Callable] = None,
30 target_transform: Optional[Callable] = None,
31 transforms: Optional[Callable] = None,
32 ) -> None:
33 super().__init__(root, transforms, transform, target_transform)
34 from pycocotools.coco import COCO
35
36 self.coco = COCO(annFile)
37 self.ids = list(sorted(self.coco.imgs.keys()))
38
39 def _load_image(self, id: int) -> Image.Image:
40 path = self.coco.loadImgs(id)[0]["file_name"]
41 return Image.open(os.path.join(self.root, path)).convert("RGB")
42
43 def _load_target(self, id: int) -> List[Any]:
44 return self.coco.loadAnns(self.coco.getAnnIds(id))
45
46 def __getitem__(self, index: int) -> Tuple[Any, Any]:
47 id = self.ids[index]
48 image = self._load_image(id)
49 target = self._load_target(id)
50
51 if self.transforms is not None:
52 image, target = self.transforms(image, target)
53
54 return image, target
55
56 def __len__(self) -> int:
57 return len(self.ids)
58
59
60 class CocoCaptions(CocoDetection):
61 """`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
62
63 It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
64
65 Args:
66 root (string): Root directory where images are downloaded to.
67 annFile (string): Path to json annotation file.
68 transform (callable, optional): A function/transform that takes in a PIL image
69 and returns a transformed version. E.g, ``transforms.PILToTensor``
70 target_transform (callable, optional): A function/transform that takes in the
71 target and transforms it.
72 transforms (callable, optional): A function/transform that takes input sample and its target as entry
73 and returns a transformed version.
74
75 Example:
76
77 .. code:: python
78
79 import torchvision.datasets as dset
80 import torchvision.transforms as transforms
81 cap = dset.CocoCaptions(root = 'dir where images are',
82 annFile = 'json annotation file',
83 transform=transforms.PILToTensor())
84
85 print('Number of samples: ', len(cap))
86 img, target = cap[3] # load 4th sample
87
88 print("Image Size: ", img.size())
89 print(target)
90
91 Output: ::
92
93 Number of samples: 82783
94 Image Size: (3L, 427L, 640L)
95 [u'A plane emitting smoke stream flying over a mountain.',
96 u'A plane darts across a bright blue sky behind a mountain covered in snow',
97 u'A plane leaves a contrail above the snowy mountain top.',
98 u'A mountain that has a plane flying overheard in the distance.',
99 u'A mountain view with a plume of smoke in the background']
100
101 """
102
103 def _load_target(self, id: int) -> List[str]:
104 return [ann["caption"] for ann in super()._load_target(id)]
105
[end of torchvision/datasets/coco.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/coco.py b/torchvision/datasets/coco.py
--- a/torchvision/datasets/coco.py
+++ b/torchvision/datasets/coco.py
@@ -44,6 +44,10 @@
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
+
+ if not isinstance(index, int):
+ raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
+
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
| {"golden_diff": "diff --git a/torchvision/datasets/coco.py b/torchvision/datasets/coco.py\n--- a/torchvision/datasets/coco.py\n+++ b/torchvision/datasets/coco.py\n@@ -44,6 +44,10 @@\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n \n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n+\n+ if not isinstance(index, int):\n+ raise ValueError(f\"Index must be of type integer, got {type(index)} instead.\")\n+\n id = self.ids[index]\n image = self._load_image(id)\n target = self._load_target(id)\n", "issue": "\u200d\u200d\u200d\u200d`CocoDetection` accepts `slice` as argument to its `__getitem__` and returns wrong result\nHi, although `CocoDetection` declares its `__getitem__` argument to be of type `int`, it actually accepts `slice` as well. However it returns a wrong result. As this might be confusing for some users, I suggest to add a type check at the beginning of the `__getitem__`.\r\nhttps://github.com/pytorch/vision/blob/cab01fc1b7c7f0620ab95c6064f2a3fb583e9bee/torchvision/datasets/coco.py#L46-L49\r\n\r\n```python\r\nds[:2]\r\n```\r\n```\r\n(tensor([[[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n [0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n [0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n ...,\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235]],\r\n \r\n [[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n [0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n [0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n ...,\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353]],\r\n \r\n [[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n [0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n [0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n ...,\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020]]]),\r\n {'category_id': tensor([2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,\r\n 1]),\r\n 'bbox': tensor([[ 390, 240, 58, 92],\r\n [ 391, 319, 66, 99],\r\n [ 663, 331, 54, 106],\r\n [ 796, 324, 50, 102],\r\n [1245, 276, 35, 117],\r\n [ 599, 265, 58, 91],\r\n [ 755, 228, 42, 91],\r\n [ 788, 261, 61, 97],\r\n [ 250, 374, 73, 105],\r\n [ 224, 449, 29, 28],\r\n [ 12, 85, 13, 32],\r\n [ 88, 70, 13, 27],\r\n [ 102, 99, 14, 33],\r\n [ 100, 132, 14, 32],\r\n [ 151, 160, 13, 34],\r\n [ 177, 115, 18, 32],\r\n [ 214, 73, 12, 28],\r\n [ 267, 58, 13, 23],\r\n [ 358, 71, 14, 33],\r\n [ 356, 160, 17, 36],\r\n [ 231, 132, 14, 33],\r\n [ 234, 88, 16, 29],\r\n [ 248, 90, 13, 25],\r\n [ 60, 109, 13, 32],\r\n [ 243, 75, 7, 5]]),\r\n 'iscrowd': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0])})\r\n```\r\nThe image in the returned tuple is that of the `ds[0]` and the target is the combined annotations of `ds[0]` and `ds[1]` which is wrong. This is due to the fact that `ds.coco` could work with a list of inputs in its `getAnnIds()`.\n\ncc @pmeier\n", "before_files": [{"content": "import os.path\nfrom typing import Any, Callable, List, Optional, Tuple\n\nfrom PIL import Image\n\nfrom .vision import VisionDataset\n\n\nclass CocoDetection(VisionDataset):\n \"\"\"`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n annFile: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n transforms: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transforms, transform, target_transform)\n from pycocotools.coco import COCO\n\n self.coco = COCO(annFile)\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def _load_image(self, id: int) -> Image.Image:\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(os.path.join(self.root, path)).convert(\"RGB\")\n\n def _load_target(self, id: int) -> List[Any]:\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n id = self.ids[index]\n image = self._load_image(id)\n target = self._load_target(id)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.ids)\n\n\nclass CocoCaptions(CocoDetection):\n \"\"\"`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n\n Example:\n\n .. code:: python\n\n import torchvision.datasets as dset\n import torchvision.transforms as transforms\n cap = dset.CocoCaptions(root = 'dir where images are',\n annFile = 'json annotation file',\n transform=transforms.PILToTensor())\n\n print('Number of samples: ', len(cap))\n img, target = cap[3] # load 4th sample\n\n print(\"Image Size: \", img.size())\n print(target)\n\n Output: ::\n\n Number of samples: 82783\n Image Size: (3L, 427L, 640L)\n [u'A plane emitting smoke stream flying over a mountain.',\n u'A plane darts across a bright blue sky behind a mountain covered in snow',\n u'A plane leaves a contrail above the snowy mountain top.',\n u'A mountain that has a plane flying overheard in the distance.',\n u'A mountain view with a plume of smoke in the background']\n\n \"\"\"\n\n def _load_target(self, id: int) -> List[str]:\n return [ann[\"caption\"] for ann in super()._load_target(id)]\n", "path": "torchvision/datasets/coco.py"}]} | 3,584 | 147 |
gh_patches_debug_43254 | rasdani/github-patches | git_diff | streamlink__streamlink-4759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.atresplayer: Live streams is not working.
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
As for today, Atresplayer live streams is not working.
### Debug log
```text
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 4.3.0
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.3.3
[cli][debug] Arguments:
[cli][debug] url=https://www.atresplayer.com/directos/antena3/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --hls-live-edge=1
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/
error: Unable to validate response text: ValidationError(NoneOrAllSchema):
ValidationError(type):
Type of <generator object search_dict at 0x000002C64BA79930> should be list, but is generator
```
</issue>
<code>
[start of src/streamlink/plugins/atresplayer.py]
1 """
2 $description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.
3 $url atresplayer.com
4 $type live
5 $region Spain
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.dash import DASHStream
14 from streamlink.stream.hls import HLSStream
15 from streamlink.utils.data import search_dict
16 from streamlink.utils.url import update_scheme
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(re.compile(
22 r"https?://(?:www\.)?atresplayer\.com/"
23 ))
24 class AtresPlayer(Plugin):
25 def _get_streams(self):
26 self.url = update_scheme("https://", self.url)
27
28 api_url = self.session.http.get(self.url, schema=validate.Schema(
29 re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL),
30 validate.none_or_all(
31 validate.get(1),
32 validate.parse_json(),
33 validate.transform(search_dict, key="href"),
34 [validate.url()],
35 validate.get(0),
36 ),
37 ))
38 if not api_url:
39 return
40 log.debug(f"API URL: {api_url}")
41
42 player_api_url = self.session.http.get(api_url, schema=validate.Schema(
43 validate.parse_json(),
44 validate.transform(search_dict, key="urlVideo"),
45 ))
46
47 stream_schema = validate.Schema(
48 validate.parse_json(),
49 {
50 "sources": [
51 validate.all(
52 {
53 "src": validate.url(),
54 validate.optional("type"): str,
55 },
56 ),
57 ],
58 },
59 validate.get("sources"),
60 )
61
62 for api_url in player_api_url:
63 log.debug(f"Player API URL: {api_url}")
64 for source in self.session.http.get(api_url, schema=stream_schema):
65 log.debug(f"Stream source: {source['src']} ({source.get('type', 'n/a')})")
66
67 if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
68 streams = HLSStream.parse_variant_playlist(self.session, source["src"])
69 if not streams:
70 yield "live", HLSStream(self.session, source["src"])
71 else:
72 yield from streams.items()
73 elif source["type"] == "application/dash+xml":
74 yield from DASHStream.parse_manifest(self.session, source["src"]).items()
75
76
77 __plugin__ = AtresPlayer
78
[end of src/streamlink/plugins/atresplayer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py
--- a/src/streamlink/plugins/atresplayer.py
+++ b/src/streamlink/plugins/atresplayer.py
@@ -7,12 +7,12 @@
import logging
import re
+from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
-from streamlink.utils.data import search_dict
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@@ -24,15 +24,15 @@
class AtresPlayer(Plugin):
def _get_streams(self):
self.url = update_scheme("https://", self.url)
+ path = urlparse(self.url).path
api_url = self.session.http.get(self.url, schema=validate.Schema(
re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL),
validate.none_or_all(
validate.get(1),
validate.parse_json(),
- validate.transform(search_dict, key="href"),
- [validate.url()],
- validate.get(0),
+ {"links": {path: {"href": validate.url()}}},
+ validate.get(("links", path, "href")),
),
))
if not api_url:
@@ -41,37 +41,46 @@
player_api_url = self.session.http.get(api_url, schema=validate.Schema(
validate.parse_json(),
- validate.transform(search_dict, key="urlVideo"),
+ {"urlVideo": validate.url()},
+ validate.get("urlVideo"),
))
- stream_schema = validate.Schema(
+ log.debug(f"Player API URL: {player_api_url}")
+ sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(
validate.parse_json(),
- {
- "sources": [
- validate.all(
- {
- "src": validate.url(),
- validate.optional("type"): str,
- },
- ),
- ],
- },
- validate.get("sources"),
- )
+ validate.any(
+ {
+ "error": str,
+ "error_description": str,
+ },
+ {
+ "sources": [
+ validate.all(
+ {
+ "src": validate.url(),
+ validate.optional("type"): str,
+ },
+ validate.union_get("type", "src"),
+ ),
+ ],
+ },
+ ),
+ ))
+ if "error" in sources:
+ log.error(f"Player API error: {sources['error']} - {sources['error_description']}")
+ return
- for api_url in player_api_url:
- log.debug(f"Player API URL: {api_url}")
- for source in self.session.http.get(api_url, schema=stream_schema):
- log.debug(f"Stream source: {source['src']} ({source.get('type', 'n/a')})")
+ for streamtype, streamsrc in sources.get("sources"):
+ log.debug(f"Stream source: {streamsrc} ({streamtype or 'n/a'})")
- if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
- streams = HLSStream.parse_variant_playlist(self.session, source["src"])
- if not streams:
- yield "live", HLSStream(self.session, source["src"])
- else:
- yield from streams.items()
- elif source["type"] == "application/dash+xml":
- yield from DASHStream.parse_manifest(self.session, source["src"]).items()
+ if streamtype == "application/vnd.apple.mpegurl":
+ streams = HLSStream.parse_variant_playlist(self.session, streamsrc)
+ if not streams:
+ yield "live", HLSStream(self.session, streamsrc)
+ else:
+ yield from streams.items()
+ elif streamtype == "application/dash+xml":
+ yield from DASHStream.parse_manifest(self.session, streamsrc).items()
__plugin__ = AtresPlayer
| {"golden_diff": "diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py\n--- a/src/streamlink/plugins/atresplayer.py\n+++ b/src/streamlink/plugins/atresplayer.py\n@@ -7,12 +7,12 @@\n \n import logging\n import re\n+from urllib.parse import urlparse\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.dash import DASHStream\n from streamlink.stream.hls import HLSStream\n-from streamlink.utils.data import search_dict\n from streamlink.utils.url import update_scheme\n \n log = logging.getLogger(__name__)\n@@ -24,15 +24,15 @@\n class AtresPlayer(Plugin):\n def _get_streams(self):\n self.url = update_scheme(\"https://\", self.url)\n+ path = urlparse(self.url).path\n \n api_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"window.__PRELOADED_STATE__\\s*=\\s*({.*?});\"\"\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n- validate.transform(search_dict, key=\"href\"),\n- [validate.url()],\n- validate.get(0),\n+ {\"links\": {path: {\"href\": validate.url()}}},\n+ validate.get((\"links\", path, \"href\")),\n ),\n ))\n if not api_url:\n@@ -41,37 +41,46 @@\n \n player_api_url = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n- validate.transform(search_dict, key=\"urlVideo\"),\n+ {\"urlVideo\": validate.url()},\n+ validate.get(\"urlVideo\"),\n ))\n \n- stream_schema = validate.Schema(\n+ log.debug(f\"Player API URL: {player_api_url}\")\n+ sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(\n validate.parse_json(),\n- {\n- \"sources\": [\n- validate.all(\n- {\n- \"src\": validate.url(),\n- validate.optional(\"type\"): str,\n- },\n- ),\n- ],\n- },\n- validate.get(\"sources\"),\n- )\n+ validate.any(\n+ {\n+ \"error\": str,\n+ \"error_description\": str,\n+ },\n+ {\n+ \"sources\": [\n+ validate.all(\n+ {\n+ \"src\": validate.url(),\n+ validate.optional(\"type\"): str,\n+ },\n+ validate.union_get(\"type\", \"src\"),\n+ ),\n+ ],\n+ },\n+ ),\n+ ))\n+ if \"error\" in sources:\n+ log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n+ return\n \n- for api_url in player_api_url:\n- log.debug(f\"Player API URL: {api_url}\")\n- for source in self.session.http.get(api_url, schema=stream_schema):\n- log.debug(f\"Stream source: {source['src']} ({source.get('type', 'n/a')})\")\n+ for streamtype, streamsrc in sources.get(\"sources\"):\n+ log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n \n- if \"type\" not in source or source[\"type\"] == \"application/vnd.apple.mpegurl\":\n- streams = HLSStream.parse_variant_playlist(self.session, source[\"src\"])\n- if not streams:\n- yield \"live\", HLSStream(self.session, source[\"src\"])\n- else:\n- yield from streams.items()\n- elif source[\"type\"] == \"application/dash+xml\":\n- yield from DASHStream.parse_manifest(self.session, source[\"src\"]).items()\n+ if streamtype == \"application/vnd.apple.mpegurl\":\n+ streams = HLSStream.parse_variant_playlist(self.session, streamsrc)\n+ if not streams:\n+ yield \"live\", HLSStream(self.session, streamsrc)\n+ else:\n+ yield from streams.items()\n+ elif streamtype == \"application/dash+xml\":\n+ yield from DASHStream.parse_manifest(self.session, streamsrc).items()\n \n \n __plugin__ = AtresPlayer\n", "issue": "plugins.atresplayer: Live streams is not working.\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest stable release\r\n\r\n### Description\r\n\r\nAs for today, Atresplayer live streams is not working.\r\n\r\n### Debug log\r\n\r\n```text\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 4.3.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.3.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.atresplayer.com/directos/antena3/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --hls-live-edge=1\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/\r\nerror: Unable to validate response text: ValidationError(NoneOrAllSchema):\r\n ValidationError(type):\r\n Type of <generator object search_dict at 0x000002C64BA79930> should be list, but is generator\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.\n$url atresplayer.com\n$type live\n$region Spain\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.data import search_dict\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?atresplayer\\.com/\"\n))\nclass AtresPlayer(Plugin):\n def _get_streams(self):\n self.url = update_scheme(\"https://\", self.url)\n\n api_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"window.__PRELOADED_STATE__\\s*=\\s*({.*?});\"\"\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n validate.transform(search_dict, key=\"href\"),\n [validate.url()],\n validate.get(0),\n ),\n ))\n if not api_url:\n return\n log.debug(f\"API URL: {api_url}\")\n\n player_api_url = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n validate.transform(search_dict, key=\"urlVideo\"),\n ))\n\n stream_schema = validate.Schema(\n validate.parse_json(),\n {\n \"sources\": [\n validate.all(\n {\n \"src\": validate.url(),\n validate.optional(\"type\"): str,\n },\n ),\n ],\n },\n validate.get(\"sources\"),\n )\n\n for api_url in player_api_url:\n log.debug(f\"Player API URL: {api_url}\")\n for source in self.session.http.get(api_url, schema=stream_schema):\n log.debug(f\"Stream source: {source['src']} ({source.get('type', 'n/a')})\")\n\n if \"type\" not in source or source[\"type\"] == \"application/vnd.apple.mpegurl\":\n streams = HLSStream.parse_variant_playlist(self.session, source[\"src\"])\n if not streams:\n yield \"live\", HLSStream(self.session, source[\"src\"])\n else:\n yield from streams.items()\n elif source[\"type\"] == \"application/dash+xml\":\n yield from DASHStream.parse_manifest(self.session, source[\"src\"]).items()\n\n\n__plugin__ = AtresPlayer\n", "path": "src/streamlink/plugins/atresplayer.py"}]} | 1,742 | 920 |
gh_patches_debug_81 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-60 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix warning about deprecation of TEMPLATE_DEBUG
When starting aimmo locally the following message is displayed:
> WARNINGS:
> ?: (1_8.W001) The standalone TEMPLATE_\* settings were deprecated in Django 1.8 and the TEMPLATES dictionary takes precedence. You must put the values of the following settings into your default TEMPLATES dict: TEMPLATE_DEBUG.
The value in question is in `aimmo/example_project/example_project/settings.py`:
`TEMPLATE_DEBUG = DEBUG`
The TEMPLATES dictionary _maybe_ the one here `aimmo/players/autoconfig.py` (?):
```
'TEMPLATES': [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
],
```
</issue>
<code>
[start of example_project/example_project/settings.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 '''Django settings for example_project project.'''
38 import os
39
40 DEBUG = True
41 TEMPLATE_DEBUG = DEBUG
42
43 DATABASES = {
44 'default': {
45 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
46 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.
47 }
48 }
49
50 USE_I18N = True
51 USE_L10N = True
52
53 TIME_ZONE = 'Europe/London'
54 LANGUAGE_CODE = 'en-gb'
55 STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
56 STATIC_URL = '/static/'
57 SECRET_KEY = 'not-a-secret'
58
59 ROOT_URLCONF = 'django_autoconfig.autourlconf'
60
61 WSGI_APPLICATION = 'example_project.wsgi.application'
62
63 INSTALLED_APPS = (
64 'django.contrib.admin',
65 'players',
66 )
67
68 LOGGING = {
69 'version': 1,
70 'disable_existing_loggers': False,
71 'filters': {
72 'require_debug_false': {
73 '()': 'django.utils.log.RequireDebugFalse'
74 }
75 },
76 'handlers': {
77 'console': {
78 'level': 'DEBUG',
79 'class': 'logging.StreamHandler'
80 },
81 },
82 'loggers': {
83 'views': {
84 'handlers': ['console'],
85 'level': 'DEBUG'
86 },
87 }
88 }
89
90 try:
91 from example_project.local_settings import * # pylint: disable=E0611
92 except ImportError:
93 pass
94
95 from django_autoconfig import autoconfig
96 autoconfig.configure_settings(globals())
97
[end of example_project/example_project/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py
--- a/example_project/example_project/settings.py
+++ b/example_project/example_project/settings.py
@@ -38,7 +38,6 @@
import os
DEBUG = True
-TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
| {"golden_diff": "diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py\n--- a/example_project/example_project/settings.py\n+++ b/example_project/example_project/settings.py\n@@ -38,7 +38,6 @@\n import os\n \n DEBUG = True\n-TEMPLATE_DEBUG = DEBUG\n \n DATABASES = {\n 'default': {\n", "issue": "Fix warning about deprecation of TEMPLATE_DEBUG\nWhen starting aimmo locally the following message is displayed:\n\n> WARNINGS:\n> ?: (1_8.W001) The standalone TEMPLATE_\\* settings were deprecated in Django 1.8 and the TEMPLATES dictionary takes precedence. You must put the values of the following settings into your default TEMPLATES dict: TEMPLATE_DEBUG.\n\nThe value in question is in `aimmo/example_project/example_project/settings.py`:\n`TEMPLATE_DEBUG = DEBUG`\n\nThe TEMPLATES dictionary _maybe_ the one here `aimmo/players/autoconfig.py` (?):\n\n```\n'TEMPLATES': [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ]\n }\n }\n ],\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Django settings for example_project project.'''\nimport os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}]} | 1,701 | 70 |
gh_patches_debug_42629 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-3869 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Permissions to delete Email-Rules/Accounts: disabled for a user, but user can still delete them
### Description
First the issue description:
A user without "delete"-permissions for email-rules can still delete them also when the rule was created by another user.
Second a question:
Is it possible for a user to fully manage his own email-rules/accounts without seeing the rules/accounts from other users?
### Steps to reproduce
1. Create a user (e.g. test-user) with mail-rule/mail-account add/edit/read permissions (no delete permission).
2. Create a mail-rule with other user (e.g. admin user)
3. Login with the "test-user"
4. Go to settings -> email
5. Delete the mail-rule created by admin-user
Also the mail-account can be deleted without delete-permissions.
### Webserver logs
```bash
No logs are visible
```
### Browser logs
_No response_
### Paperless-ngx version
1.16.5
### Host OS
Debian x86_64
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless_mail/admin.py]
1 from django import forms
2 from django.contrib import admin
3 from django.utils.translation import gettext_lazy as _
4
5 from paperless_mail.models import MailAccount
6 from paperless_mail.models import MailRule
7 from paperless_mail.models import ProcessedMail
8
9
10 class MailAccountAdminForm(forms.ModelForm):
11
12 """Metadata classes used by Django admin to display the form."""
13
14 class Meta:
15
16 """Metadata class used by Django admin to display the form."""
17
18 model = MailAccount
19 widgets = {
20 "password": forms.PasswordInput(),
21 }
22 fields = [
23 "name",
24 "imap_server",
25 "username",
26 "imap_security",
27 "username",
28 "password",
29 "is_token",
30 "character_set",
31 ]
32
33
34 class MailAccountAdmin(admin.ModelAdmin):
35 list_display = ("name", "imap_server", "username")
36
37 fieldsets = [
38 (None, {"fields": ["name", "imap_server", "imap_port"]}),
39 (
40 _("Authentication"),
41 {"fields": ["imap_security", "username", "password", "is_token"]},
42 ),
43 (_("Advanced settings"), {"fields": ["character_set"]}),
44 ]
45 form = MailAccountAdminForm
46
47
48 class MailRuleAdmin(admin.ModelAdmin):
49 radio_fields = {
50 "attachment_type": admin.VERTICAL,
51 "action": admin.VERTICAL,
52 "assign_title_from": admin.VERTICAL,
53 "assign_correspondent_from": admin.VERTICAL,
54 }
55
56 fieldsets = (
57 (None, {"fields": ("name", "order", "account", "folder")}),
58 (
59 _("Filter"),
60 {
61 "description": _(
62 "Paperless will only process mails that match ALL of the "
63 "filters given below.",
64 ),
65 "fields": (
66 "filter_from",
67 "filter_to",
68 "filter_subject",
69 "filter_body",
70 "filter_attachment_filename",
71 "maximum_age",
72 "consumption_scope",
73 "attachment_type",
74 ),
75 },
76 ),
77 (
78 _("Actions"),
79 {
80 "description": _(
81 "The action applied to the mail. This action is only "
82 "performed when the mail body or attachments were "
83 "consumed from the mail.",
84 ),
85 "fields": ("action", "action_parameter"),
86 },
87 ),
88 (
89 _("Metadata"),
90 {
91 "description": _(
92 "Assign metadata to documents consumed from this rule "
93 "automatically. If you do not assign tags, types or "
94 "correspondents here, paperless will still process all "
95 "matching rules that you have defined.",
96 ),
97 "fields": (
98 "assign_title_from",
99 "assign_tags",
100 "assign_document_type",
101 "assign_correspondent_from",
102 "assign_correspondent",
103 ),
104 },
105 ),
106 )
107
108 list_filter = ("account",)
109
110 list_display = ("order", "name", "account", "folder", "action")
111
112 list_editable = ("order",)
113
114 list_display_links = ("name",)
115
116 sortable_by = []
117
118 ordering = ["order"]
119
120
121 class ProcessedMailAdmin(admin.ModelAdmin):
122 class Meta:
123 model = ProcessedMail
124 fields = "__all__"
125
126 list_display = ("subject", "status", "processed", "received", "rule")
127
128 ordering = ["-processed"]
129
130 readonly_fields = [
131 "owner",
132 "rule",
133 "folder",
134 "uid",
135 "subject",
136 "received",
137 "processed",
138 "status",
139 "error",
140 ]
141
142 list_display_links = ["subject"]
143
144 list_filter = ("status", "rule")
145
146
147 admin.site.register(MailAccount, MailAccountAdmin)
148 admin.site.register(MailRule, MailRuleAdmin)
149 admin.site.register(ProcessedMail, ProcessedMailAdmin)
150
[end of src/paperless_mail/admin.py]
[start of src/paperless_mail/serialisers.py]
1 from rest_framework import serializers
2
3 from documents.serialisers import CorrespondentField
4 from documents.serialisers import DocumentTypeField
5 from documents.serialisers import OwnedObjectSerializer
6 from documents.serialisers import TagsField
7 from paperless_mail.models import MailAccount
8 from paperless_mail.models import MailRule
9
10
11 class ObfuscatedPasswordField(serializers.Field):
12 """
13 Sends *** string instead of password in the clear
14 """
15
16 def to_representation(self, value):
17 return "*" * len(value)
18
19 def to_internal_value(self, data):
20 return data
21
22
23 class MailAccountSerializer(OwnedObjectSerializer):
24 password = ObfuscatedPasswordField()
25
26 class Meta:
27 model = MailAccount
28 depth = 1
29 fields = [
30 "id",
31 "name",
32 "imap_server",
33 "imap_port",
34 "imap_security",
35 "username",
36 "password",
37 "character_set",
38 "is_token",
39 ]
40
41 def update(self, instance, validated_data):
42 if (
43 "password" in validated_data
44 and len(validated_data.get("password").replace("*", "")) == 0
45 ):
46 validated_data.pop("password")
47 super().update(instance, validated_data)
48 return instance
49
50
51 class AccountField(serializers.PrimaryKeyRelatedField):
52 def get_queryset(self):
53 return MailAccount.objects.all().order_by("-id")
54
55
56 class MailRuleSerializer(OwnedObjectSerializer):
57 account = AccountField(required=True)
58 action_parameter = serializers.CharField(
59 allow_null=True,
60 required=False,
61 default="",
62 )
63 assign_correspondent = CorrespondentField(allow_null=True, required=False)
64 assign_tags = TagsField(many=True, allow_null=True, required=False)
65 assign_document_type = DocumentTypeField(allow_null=True, required=False)
66 order = serializers.IntegerField(required=False)
67
68 class Meta:
69 model = MailRule
70 depth = 1
71 fields = [
72 "id",
73 "name",
74 "account",
75 "folder",
76 "filter_from",
77 "filter_to",
78 "filter_subject",
79 "filter_body",
80 "filter_attachment_filename",
81 "maximum_age",
82 "action",
83 "action_parameter",
84 "assign_title_from",
85 "assign_tags",
86 "assign_correspondent_from",
87 "assign_correspondent",
88 "assign_document_type",
89 "order",
90 "attachment_type",
91 "consumption_scope",
92 ]
93
94 def update(self, instance, validated_data):
95 super().update(instance, validated_data)
96 return instance
97
98 def create(self, validated_data):
99 if "assign_tags" in validated_data:
100 assign_tags = validated_data.pop("assign_tags")
101 mail_rule = super().create(validated_data)
102 if assign_tags:
103 mail_rule.assign_tags.set(assign_tags)
104 return mail_rule
105
106 def validate(self, attrs):
107 if (
108 attrs["action"] == MailRule.MailAction.TAG
109 or attrs["action"] == MailRule.MailAction.MOVE
110 ) and attrs["action_parameter"] is None:
111 raise serializers.ValidationError("An action parameter is required.")
112
113 return attrs
114
[end of src/paperless_mail/serialisers.py]
[start of src/paperless_mail/views.py]
1 import datetime
2 import logging
3
4 from django.http import HttpResponseBadRequest
5 from rest_framework.generics import GenericAPIView
6 from rest_framework.permissions import IsAuthenticated
7 from rest_framework.response import Response
8 from rest_framework.viewsets import ModelViewSet
9
10 from documents.views import PassUserMixin
11 from paperless.views import StandardPagination
12 from paperless_mail.mail import MailError
13 from paperless_mail.mail import get_mailbox
14 from paperless_mail.mail import mailbox_login
15 from paperless_mail.models import MailAccount
16 from paperless_mail.models import MailRule
17 from paperless_mail.serialisers import MailAccountSerializer
18 from paperless_mail.serialisers import MailRuleSerializer
19
20
21 class MailAccountViewSet(ModelViewSet, PassUserMixin):
22 model = MailAccount
23
24 queryset = MailAccount.objects.all().order_by("pk")
25 serializer_class = MailAccountSerializer
26 pagination_class = StandardPagination
27 permission_classes = (IsAuthenticated,)
28
29
30 class MailRuleViewSet(ModelViewSet, PassUserMixin):
31 model = MailRule
32
33 queryset = MailRule.objects.all().order_by("order")
34 serializer_class = MailRuleSerializer
35 pagination_class = StandardPagination
36 permission_classes = (IsAuthenticated,)
37
38
39 class MailAccountTestView(GenericAPIView):
40 permission_classes = (IsAuthenticated,)
41 serializer_class = MailAccountSerializer
42
43 def post(self, request, *args, **kwargs):
44 logger = logging.getLogger("paperless_mail")
45 request.data["name"] = datetime.datetime.now().isoformat()
46 serializer = self.get_serializer(data=request.data)
47 serializer.is_valid(raise_exception=True)
48
49 # account exists, use the password from there instead of ***
50 if (
51 len(serializer.validated_data.get("password").replace("*", "")) == 0
52 and request.data["id"] is not None
53 ):
54 serializer.validated_data["password"] = MailAccount.objects.get(
55 pk=request.data["id"],
56 ).password
57
58 account = MailAccount(**serializer.validated_data)
59
60 with get_mailbox(
61 account.imap_server,
62 account.imap_port,
63 account.imap_security,
64 ) as M:
65 try:
66 mailbox_login(M, account)
67 return Response({"success": True})
68 except MailError:
69 logger.error(
70 f"Mail account {account} test failed",
71 )
72 return HttpResponseBadRequest("Unable to connect to server")
73
[end of src/paperless_mail/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless_mail/admin.py b/src/paperless_mail/admin.py
--- a/src/paperless_mail/admin.py
+++ b/src/paperless_mail/admin.py
@@ -1,6 +1,7 @@
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
+from guardian.admin import GuardedModelAdmin
from paperless_mail.models import MailAccount
from paperless_mail.models import MailRule
@@ -31,7 +32,7 @@
]
-class MailAccountAdmin(admin.ModelAdmin):
+class MailAccountAdmin(GuardedModelAdmin):
list_display = ("name", "imap_server", "username")
fieldsets = [
@@ -45,7 +46,7 @@
form = MailAccountAdminForm
-class MailRuleAdmin(admin.ModelAdmin):
+class MailRuleAdmin(GuardedModelAdmin):
radio_fields = {
"attachment_type": admin.VERTICAL,
"action": admin.VERTICAL,
diff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py
--- a/src/paperless_mail/serialisers.py
+++ b/src/paperless_mail/serialisers.py
@@ -25,7 +25,6 @@
class Meta:
model = MailAccount
- depth = 1
fields = [
"id",
"name",
@@ -36,6 +35,10 @@
"password",
"character_set",
"is_token",
+ "owner",
+ "user_can_change",
+ "permissions",
+ "set_permissions",
]
def update(self, instance, validated_data):
@@ -67,7 +70,6 @@
class Meta:
model = MailRule
- depth = 1
fields = [
"id",
"name",
@@ -89,6 +91,10 @@
"order",
"attachment_type",
"consumption_scope",
+ "owner",
+ "user_can_change",
+ "permissions",
+ "set_permissions",
]
def update(self, instance, validated_data):
diff --git a/src/paperless_mail/views.py b/src/paperless_mail/views.py
--- a/src/paperless_mail/views.py
+++ b/src/paperless_mail/views.py
@@ -7,6 +7,8 @@
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
+from documents.filters import ObjectOwnedOrGrantedPermissionsFilter
+from documents.permissions import PaperlessObjectPermissions
from documents.views import PassUserMixin
from paperless.views import StandardPagination
from paperless_mail.mail import MailError
@@ -24,7 +26,8 @@
queryset = MailAccount.objects.all().order_by("pk")
serializer_class = MailAccountSerializer
pagination_class = StandardPagination
- permission_classes = (IsAuthenticated,)
+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)
+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)
class MailRuleViewSet(ModelViewSet, PassUserMixin):
@@ -33,7 +36,8 @@
queryset = MailRule.objects.all().order_by("order")
serializer_class = MailRuleSerializer
pagination_class = StandardPagination
- permission_classes = (IsAuthenticated,)
+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)
+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)
class MailAccountTestView(GenericAPIView):
| {"golden_diff": "diff --git a/src/paperless_mail/admin.py b/src/paperless_mail/admin.py\n--- a/src/paperless_mail/admin.py\n+++ b/src/paperless_mail/admin.py\n@@ -1,6 +1,7 @@\n from django import forms\n from django.contrib import admin\n from django.utils.translation import gettext_lazy as _\n+from guardian.admin import GuardedModelAdmin\n \n from paperless_mail.models import MailAccount\n from paperless_mail.models import MailRule\n@@ -31,7 +32,7 @@\n ]\n \n \n-class MailAccountAdmin(admin.ModelAdmin):\n+class MailAccountAdmin(GuardedModelAdmin):\n list_display = (\"name\", \"imap_server\", \"username\")\n \n fieldsets = [\n@@ -45,7 +46,7 @@\n form = MailAccountAdminForm\n \n \n-class MailRuleAdmin(admin.ModelAdmin):\n+class MailRuleAdmin(GuardedModelAdmin):\n radio_fields = {\n \"attachment_type\": admin.VERTICAL,\n \"action\": admin.VERTICAL,\ndiff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py\n--- a/src/paperless_mail/serialisers.py\n+++ b/src/paperless_mail/serialisers.py\n@@ -25,7 +25,6 @@\n \n class Meta:\n model = MailAccount\n- depth = 1\n fields = [\n \"id\",\n \"name\",\n@@ -36,6 +35,10 @@\n \"password\",\n \"character_set\",\n \"is_token\",\n+ \"owner\",\n+ \"user_can_change\",\n+ \"permissions\",\n+ \"set_permissions\",\n ]\n \n def update(self, instance, validated_data):\n@@ -67,7 +70,6 @@\n \n class Meta:\n model = MailRule\n- depth = 1\n fields = [\n \"id\",\n \"name\",\n@@ -89,6 +91,10 @@\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n+ \"owner\",\n+ \"user_can_change\",\n+ \"permissions\",\n+ \"set_permissions\",\n ]\n \n def update(self, instance, validated_data):\ndiff --git a/src/paperless_mail/views.py b/src/paperless_mail/views.py\n--- a/src/paperless_mail/views.py\n+++ b/src/paperless_mail/views.py\n@@ -7,6 +7,8 @@\n from rest_framework.response import Response\n from rest_framework.viewsets import ModelViewSet\n \n+from documents.filters import ObjectOwnedOrGrantedPermissionsFilter\n+from documents.permissions import PaperlessObjectPermissions\n from documents.views import PassUserMixin\n from paperless.views import StandardPagination\n from paperless_mail.mail import MailError\n@@ -24,7 +26,8 @@\n queryset = MailAccount.objects.all().order_by(\"pk\")\n serializer_class = MailAccountSerializer\n pagination_class = StandardPagination\n- permission_classes = (IsAuthenticated,)\n+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n \n \n class MailRuleViewSet(ModelViewSet, PassUserMixin):\n@@ -33,7 +36,8 @@\n queryset = MailRule.objects.all().order_by(\"order\")\n serializer_class = MailRuleSerializer\n pagination_class = StandardPagination\n- permission_classes = (IsAuthenticated,)\n+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n \n \n class MailAccountTestView(GenericAPIView):\n", "issue": "[BUG] Permissions to delete Email-Rules/Accounts: disabled for a user, but user can still delete them\n### Description\r\n\r\nFirst the issue description: \r\nA user without \"delete\"-permissions for email-rules can still delete them also when the rule was created by another user.\r\n\r\nSecond a question:\r\nIs it possible for a user to fully manage his own email-rules/accounts without seeing the rules/accounts from other users?\r\n\r\n### Steps to reproduce\r\n\r\n1. Create a user (e.g. test-user) with mail-rule/mail-account add/edit/read permissions (no delete permission).\r\n2. Create a mail-rule with other user (e.g. admin user)\r\n3. Login with the \"test-user\"\r\n4. Go to settings -> email \r\n5. Delete the mail-rule created by admin-user\r\n\r\nAlso the mail-account can be deleted without delete-permissions.\r\n\r\n### Webserver logs\r\n\r\n```bash\r\nNo logs are visible\r\n```\r\n\r\n\r\n### Browser logs\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.16.5\r\n\r\n### Host OS\r\n\r\nDebian x86_64\r\n\r\n### Installation method\r\n\r\nDocker - official image\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "from django import forms\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n\nclass MailAccountAdminForm(forms.ModelForm):\n\n \"\"\"Metadata classes used by Django admin to display the form.\"\"\"\n\n class Meta:\n\n \"\"\"Metadata class used by Django admin to display the form.\"\"\"\n\n model = MailAccount\n widgets = {\n \"password\": forms.PasswordInput(),\n }\n fields = [\n \"name\",\n \"imap_server\",\n \"username\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"is_token\",\n \"character_set\",\n ]\n\n\nclass MailAccountAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"imap_server\", \"username\")\n\n fieldsets = [\n (None, {\"fields\": [\"name\", \"imap_server\", \"imap_port\"]}),\n (\n _(\"Authentication\"),\n {\"fields\": [\"imap_security\", \"username\", \"password\", \"is_token\"]},\n ),\n (_(\"Advanced settings\"), {\"fields\": [\"character_set\"]}),\n ]\n form = MailAccountAdminForm\n\n\nclass MailRuleAdmin(admin.ModelAdmin):\n radio_fields = {\n \"attachment_type\": admin.VERTICAL,\n \"action\": admin.VERTICAL,\n \"assign_title_from\": admin.VERTICAL,\n \"assign_correspondent_from\": admin.VERTICAL,\n }\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"order\", \"account\", \"folder\")}),\n (\n _(\"Filter\"),\n {\n \"description\": _(\n \"Paperless will only process mails that match ALL of the \"\n \"filters given below.\",\n ),\n \"fields\": (\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"consumption_scope\",\n \"attachment_type\",\n ),\n },\n ),\n (\n _(\"Actions\"),\n {\n \"description\": _(\n \"The action applied to the mail. This action is only \"\n \"performed when the mail body or attachments were \"\n \"consumed from the mail.\",\n ),\n \"fields\": (\"action\", \"action_parameter\"),\n },\n ),\n (\n _(\"Metadata\"),\n {\n \"description\": _(\n \"Assign metadata to documents consumed from this rule \"\n \"automatically. If you do not assign tags, types or \"\n \"correspondents here, paperless will still process all \"\n \"matching rules that you have defined.\",\n ),\n \"fields\": (\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_document_type\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n ),\n },\n ),\n )\n\n list_filter = (\"account\",)\n\n list_display = (\"order\", \"name\", \"account\", \"folder\", \"action\")\n\n list_editable = (\"order\",)\n\n list_display_links = (\"name\",)\n\n sortable_by = []\n\n ordering = [\"order\"]\n\n\nclass ProcessedMailAdmin(admin.ModelAdmin):\n class Meta:\n model = ProcessedMail\n fields = \"__all__\"\n\n list_display = (\"subject\", \"status\", \"processed\", \"received\", \"rule\")\n\n ordering = [\"-processed\"]\n\n readonly_fields = [\n \"owner\",\n \"rule\",\n \"folder\",\n \"uid\",\n \"subject\",\n \"received\",\n \"processed\",\n \"status\",\n \"error\",\n ]\n\n list_display_links = [\"subject\"]\n\n list_filter = (\"status\", \"rule\")\n\n\nadmin.site.register(MailAccount, MailAccountAdmin)\nadmin.site.register(MailRule, MailRuleAdmin)\nadmin.site.register(ProcessedMail, ProcessedMailAdmin)\n", "path": "src/paperless_mail/admin.py"}, {"content": "from rest_framework import serializers\n\nfrom documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import OwnedObjectSerializer\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(OwnedObjectSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n \"is_token\",\n ]\n\n def update(self, instance, validated_data):\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n ):\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(OwnedObjectSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = super().create(validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n", "path": "src/paperless_mail/serialisers.py"}, {"content": "import datetime\nimport logging\n\nfrom django.http import HttpResponseBadRequest\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom documents.views import PassUserMixin\nfrom paperless.views import StandardPagination\nfrom paperless_mail.mail import MailError\nfrom paperless_mail.mail import get_mailbox\nfrom paperless_mail.mail import mailbox_login\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.serialisers import MailAccountSerializer\nfrom paperless_mail.serialisers import MailRuleSerializer\n\n\nclass MailAccountViewSet(ModelViewSet, PassUserMixin):\n model = MailAccount\n\n queryset = MailAccount.objects.all().order_by(\"pk\")\n serializer_class = MailAccountSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated,)\n\n\nclass MailRuleViewSet(ModelViewSet, PassUserMixin):\n model = MailRule\n\n queryset = MailRule.objects.all().order_by(\"order\")\n serializer_class = MailRuleSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated,)\n\n\nclass MailAccountTestView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = MailAccountSerializer\n\n def post(self, request, *args, **kwargs):\n logger = logging.getLogger(\"paperless_mail\")\n request.data[\"name\"] = datetime.datetime.now().isoformat()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n # account exists, use the password from there instead of ***\n if (\n len(serializer.validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n and request.data[\"id\"] is not None\n ):\n serializer.validated_data[\"password\"] = MailAccount.objects.get(\n pk=request.data[\"id\"],\n ).password\n\n account = MailAccount(**serializer.validated_data)\n\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n try:\n mailbox_login(M, account)\n return Response({\"success\": True})\n except MailError:\n logger.error(\n f\"Mail account {account} test failed\",\n )\n return HttpResponseBadRequest(\"Unable to connect to server\")\n", "path": "src/paperless_mail/views.py"}]} | 3,518 | 758 |
gh_patches_debug_23806 | rasdani/github-patches | git_diff | ipython__ipython-9861 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tools/github_stats.py still fails on two factor auth with app
See #9179. I think #9186 fixed it only for SMS codes, not for OTPs generated from a smartphone app.
</issue>
<code>
[start of tools/gh_api.py]
1 """Functions for Github API requests."""
2 from __future__ import print_function
3
4 try:
5 input = raw_input
6 except NameError:
7 pass
8
9 import os
10 import re
11 import sys
12
13 import requests
14 import getpass
15 import json
16
17 try:
18 import requests_cache
19 except ImportError:
20 print("cache not available, install `requests_cache` for caching.", file=sys.stderr)
21 else:
22 requests_cache.install_cache("gh_api", expire_after=3600)
23
24 # Keyring stores passwords by a 'username', but we're not storing a username and
25 # password
26 fake_username = 'ipython_tools'
27
28 class Obj(dict):
29 """Dictionary with attribute access to names."""
30 def __getattr__(self, name):
31 try:
32 return self[name]
33 except KeyError:
34 raise AttributeError(name)
35
36 def __setattr__(self, name, val):
37 self[name] = val
38
39 token = None
40 def get_auth_token():
41 global token
42
43 if token is not None:
44 return token
45
46 import keyring
47 token = keyring.get_password('github', fake_username)
48 if token is not None:
49 return token
50
51 print("Please enter your github username and password. These are not "
52 "stored, only used to get an oAuth token. You can revoke this at "
53 "any time on Github.\n"
54 "Username: ", file=sys.stderr, end='')
55 user = input('')
56 pw = getpass.getpass("Password: ", stream=sys.stderr)
57
58 auth_request = {
59 "scopes": [
60 "public_repo",
61 "gist"
62 ],
63 "note": "IPython tools",
64 "note_url": "https://github.com/ipython/ipython/tree/master/tools",
65 }
66 response = requests.post('https://api.github.com/authorizations',
67 auth=(user, pw), data=json.dumps(auth_request))
68 if response.status_code == 401 and \
69 response.headers.get('X-GitHub-OTP') == 'required; sms':
70 print("Your login API resquest a SMS one time password", file=sys.stderr)
71 sms_pw = getpass.getpass("SMS password: ", stream=sys.stderr)
72 response = requests.post('https://api.github.com/authorizations',
73 auth=(user, pw),
74 data=json.dumps(auth_request),
75 headers={'X-GitHub-OTP':sms_pw})
76 response.raise_for_status()
77 token = json.loads(response.text)['token']
78 keyring.set_password('github', fake_username, token)
79 return token
80
81 def make_auth_header():
82 return {'Authorization': 'token ' + get_auth_token()}
83
84 def post_issue_comment(project, num, body):
85 url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
86 payload = json.dumps({'body': body})
87 requests.post(url, data=payload, headers=make_auth_header())
88
89 def post_gist(content, description='', filename='file', auth=False):
90 """Post some text to a Gist, and return the URL."""
91 post_data = json.dumps({
92 "description": description,
93 "public": True,
94 "files": {
95 filename: {
96 "content": content
97 }
98 }
99 }).encode('utf-8')
100
101 headers = make_auth_header() if auth else {}
102 response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
103 response.raise_for_status()
104 response_data = json.loads(response.text)
105 return response_data['html_url']
106
107 def get_pull_request(project, num, auth=False):
108 """get pull request info by number
109 """
110 url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
111 if auth:
112 header = make_auth_header()
113 else:
114 header = None
115 print("fetching %s" % url, file=sys.stderr)
116 response = requests.get(url, headers=header)
117 response.raise_for_status()
118 return json.loads(response.text, object_hook=Obj)
119
120 def get_pull_request_files(project, num, auth=False):
121 """get list of files in a pull request"""
122 url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
123 if auth:
124 header = make_auth_header()
125 else:
126 header = None
127 return get_paged_request(url, headers=header)
128
129 element_pat = re.compile(r'<(.+?)>')
130 rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
131
132 def get_paged_request(url, headers=None, **params):
133 """get a full list, handling APIv3's paging"""
134 results = []
135 params.setdefault("per_page", 100)
136 while True:
137 if '?' in url:
138 params = None
139 print("fetching %s" % url, file=sys.stderr)
140 else:
141 print("fetching %s with %s" % (url, params), file=sys.stderr)
142 response = requests.get(url, headers=headers, params=params)
143 response.raise_for_status()
144 results.extend(response.json())
145 if 'next' in response.links:
146 url = response.links['next']['url']
147 else:
148 break
149 return results
150
151 def get_pulls_list(project, auth=False, **params):
152 """get pull request list"""
153 params.setdefault("state", "closed")
154 url = "https://api.github.com/repos/{project}/pulls".format(project=project)
155 if auth:
156 headers = make_auth_header()
157 else:
158 headers = None
159 pages = get_paged_request(url, headers=headers, **params)
160 return pages
161
162 def get_issues_list(project, auth=False, **params):
163 """get issues list"""
164 params.setdefault("state", "closed")
165 url = "https://api.github.com/repos/{project}/issues".format(project=project)
166 if auth:
167 headers = make_auth_header()
168 else:
169 headers = None
170 pages = get_paged_request(url, headers=headers, **params)
171 return pages
172
173 def get_milestones(project, auth=False, **params):
174 params.setdefault('state', 'all')
175 url = "https://api.github.com/repos/{project}/milestones".format(project=project)
176 if auth:
177 headers = make_auth_header()
178 else:
179 headers = None
180 milestones = get_paged_request(url, headers=headers, **params)
181 return milestones
182
183 def get_milestone_id(project, milestone, auth=False, **params):
184 milestones = get_milestones(project, auth=auth, **params)
185 for mstone in milestones:
186 if mstone['title'] == milestone:
187 return mstone['number']
188 else:
189 raise ValueError("milestone %s not found" % milestone)
190
191 def is_pull_request(issue):
192 """Return True if the given issue is a pull request."""
193 return bool(issue.get('pull_request', {}).get('html_url', None))
194
195 def get_authors(pr):
196 print("getting authors for #%i" % pr['number'], file=sys.stderr)
197 h = make_auth_header()
198 r = requests.get(pr['commits_url'], headers=h)
199 r.raise_for_status()
200 commits = r.json()
201 authors = []
202 for commit in commits:
203 author = commit['commit']['author']
204 authors.append("%s <%s>" % (author['name'], author['email']))
205 return authors
206
207 # encode_multipart_formdata is from urllib3.filepost
208 # The only change is to iter_fields, to enforce S3's required key ordering
209
210 def iter_fields(fields):
211 fields = fields.copy()
212 for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
213 'Policy', 'Signature', 'Content-Type', 'file'):
214 yield (key, fields.pop(key))
215 for (k,v) in fields.items():
216 yield k,v
217
218 def encode_multipart_formdata(fields, boundary=None):
219 """
220 Encode a dictionary of ``fields`` using the multipart/form-data mime format.
221
222 :param fields:
223 Dictionary of fields or list of (key, value) field tuples. The key is
224 treated as the field name, and the value as the body of the form-data
225 bytes. If the value is a tuple of two elements, then the first element
226 is treated as the filename of the form-data section.
227
228 Field names and filenames must be unicode.
229
230 :param boundary:
231 If not specified, then a random boundary will be generated using
232 :func:`mimetools.choose_boundary`.
233 """
234 # copy requests imports in here:
235 from io import BytesIO
236 from requests.packages.urllib3.filepost import (
237 choose_boundary, six, writer, b, get_content_type
238 )
239 body = BytesIO()
240 if boundary is None:
241 boundary = choose_boundary()
242
243 for fieldname, value in iter_fields(fields):
244 body.write(b('--%s\r\n' % (boundary)))
245
246 if isinstance(value, tuple):
247 filename, data = value
248 writer(body).write('Content-Disposition: form-data; name="%s"; '
249 'filename="%s"\r\n' % (fieldname, filename))
250 body.write(b('Content-Type: %s\r\n\r\n' %
251 (get_content_type(filename))))
252 else:
253 data = value
254 writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
255 % (fieldname))
256 body.write(b'Content-Type: text/plain\r\n\r\n')
257
258 if isinstance(data, int):
259 data = str(data) # Backwards compatibility
260 if isinstance(data, six.text_type):
261 writer(body).write(data)
262 else:
263 body.write(data)
264
265 body.write(b'\r\n')
266
267 body.write(b('--%s--\r\n' % (boundary)))
268
269 content_type = b('multipart/form-data; boundary=%s' % boundary)
270
271 return body.getvalue(), content_type
272
273
274 def post_download(project, filename, name=None, description=""):
275 """Upload a file to the GitHub downloads area"""
276 if name is None:
277 name = os.path.basename(filename)
278 with open(filename, 'rb') as f:
279 filedata = f.read()
280
281 url = "https://api.github.com/repos/{project}/downloads".format(project=project)
282
283 payload = json.dumps(dict(name=name, size=len(filedata),
284 description=description))
285 response = requests.post(url, data=payload, headers=make_auth_header())
286 response.raise_for_status()
287 reply = json.loads(response.content)
288 s3_url = reply['s3_url']
289
290 fields = dict(
291 key=reply['path'],
292 acl=reply['acl'],
293 success_action_status=201,
294 Filename=reply['name'],
295 AWSAccessKeyId=reply['accesskeyid'],
296 Policy=reply['policy'],
297 Signature=reply['signature'],
298 file=(reply['name'], filedata),
299 )
300 fields['Content-Type'] = reply['mime_type']
301 data, content_type = encode_multipart_formdata(fields)
302 s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
303 return s3r
304
[end of tools/gh_api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/gh_api.py b/tools/gh_api.py
--- a/tools/gh_api.py
+++ b/tools/gh_api.py
@@ -66,13 +66,13 @@
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
if response.status_code == 401 and \
- response.headers.get('X-GitHub-OTP') == 'required; sms':
- print("Your login API resquest a SMS one time password", file=sys.stderr)
- sms_pw = getpass.getpass("SMS password: ", stream=sys.stderr)
+ 'required;' in response.headers.get('X-GitHub-OTP', ''):
+ print("Your login API requested a one time password", file=sys.stderr)
+ otp = getpass.getpass("One Time Password: ", stream=sys.stderr)
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw),
data=json.dumps(auth_request),
- headers={'X-GitHub-OTP':sms_pw})
+ headers={'X-GitHub-OTP':otp})
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
| {"golden_diff": "diff --git a/tools/gh_api.py b/tools/gh_api.py\n--- a/tools/gh_api.py\n+++ b/tools/gh_api.py\n@@ -66,13 +66,13 @@\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), data=json.dumps(auth_request))\n if response.status_code == 401 and \\\n- response.headers.get('X-GitHub-OTP') == 'required; sms':\n- print(\"Your login API resquest a SMS one time password\", file=sys.stderr)\n- sms_pw = getpass.getpass(\"SMS password: \", stream=sys.stderr)\n+ 'required;' in response.headers.get('X-GitHub-OTP', ''):\n+ print(\"Your login API requested a one time password\", file=sys.stderr)\n+ otp = getpass.getpass(\"One Time Password: \", stream=sys.stderr)\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), \n data=json.dumps(auth_request),\n- headers={'X-GitHub-OTP':sms_pw})\n+ headers={'X-GitHub-OTP':otp})\n response.raise_for_status()\n token = json.loads(response.text)['token']\n keyring.set_password('github', fake_username, token)\n", "issue": "tools/github_stats.py still fails on two factor auth with app\nSee #9179. I think #9186 fixed it only for SMS codes, not for OTPs generated from a smartphone app.\n\n", "before_files": [{"content": "\"\"\"Functions for Github API requests.\"\"\"\nfrom __future__ import print_function\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nimport os\nimport re\nimport sys\n\nimport requests\nimport getpass\nimport json\n\ntry:\n import requests_cache\nexcept ImportError:\n print(\"cache not available, install `requests_cache` for caching.\", file=sys.stderr)\nelse:\n requests_cache.install_cache(\"gh_api\", expire_after=3600)\n\n# Keyring stores passwords by a 'username', but we're not storing a username and\n# password\nfake_username = 'ipython_tools'\n\nclass Obj(dict):\n \"\"\"Dictionary with attribute access to names.\"\"\"\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, val):\n self[name] = val\n\ntoken = None\ndef get_auth_token():\n global token\n\n if token is not None:\n return token\n\n import keyring\n token = keyring.get_password('github', fake_username)\n if token is not None:\n return token\n\n print(\"Please enter your github username and password. These are not \"\n \"stored, only used to get an oAuth token. You can revoke this at \"\n \"any time on Github.\\n\"\n \"Username: \", file=sys.stderr, end='')\n user = input('')\n pw = getpass.getpass(\"Password: \", stream=sys.stderr)\n\n auth_request = {\n \"scopes\": [\n \"public_repo\",\n \"gist\"\n ],\n \"note\": \"IPython tools\",\n \"note_url\": \"https://github.com/ipython/ipython/tree/master/tools\",\n }\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), data=json.dumps(auth_request))\n if response.status_code == 401 and \\\n response.headers.get('X-GitHub-OTP') == 'required; sms':\n print(\"Your login API resquest a SMS one time password\", file=sys.stderr)\n sms_pw = getpass.getpass(\"SMS password: \", stream=sys.stderr)\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), \n data=json.dumps(auth_request),\n headers={'X-GitHub-OTP':sms_pw})\n response.raise_for_status()\n token = json.loads(response.text)['token']\n keyring.set_password('github', fake_username, token)\n return token\n\ndef make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n\ndef post_issue_comment(project, num, body):\n url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)\n payload = json.dumps({'body': body})\n requests.post(url, data=payload, headers=make_auth_header())\n\ndef post_gist(content, description='', filename='file', auth=False):\n \"\"\"Post some text to a Gist, and return the URL.\"\"\"\n post_data = json.dumps({\n \"description\": description,\n \"public\": True,\n \"files\": {\n filename: {\n \"content\": content\n }\n }\n }).encode('utf-8')\n\n headers = make_auth_header() if auth else {}\n response = requests.post(\"https://api.github.com/gists\", data=post_data, headers=headers)\n response.raise_for_status()\n response_data = json.loads(response.text)\n return response_data['html_url']\n\ndef get_pull_request(project, num, auth=False):\n \"\"\"get pull request info by number\n \"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n print(\"fetching %s\" % url, file=sys.stderr)\n response = requests.get(url, headers=header)\n response.raise_for_status()\n return json.loads(response.text, object_hook=Obj)\n\ndef get_pull_request_files(project, num, auth=False):\n \"\"\"get list of files in a pull request\"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}/files\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n return get_paged_request(url, headers=header)\n\nelement_pat = re.compile(r'<(.+?)>')\nrel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')\n\ndef get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n params = None\n print(\"fetching %s\" % url, file=sys.stderr)\n else:\n print(\"fetching %s with %s\" % (url, params), file=sys.stderr)\n response = requests.get(url, headers=headers, params=params)\n response.raise_for_status()\n results.extend(response.json())\n if 'next' in response.links:\n url = response.links['next']['url']\n else:\n break\n return results\n\ndef get_pulls_list(project, auth=False, **params):\n \"\"\"get pull request list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/pulls\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_issues_list(project, auth=False, **params):\n \"\"\"get issues list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/issues\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_milestones(project, auth=False, **params):\n params.setdefault('state', 'all')\n url = \"https://api.github.com/repos/{project}/milestones\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n milestones = get_paged_request(url, headers=headers, **params)\n return milestones\n\ndef get_milestone_id(project, milestone, auth=False, **params):\n milestones = get_milestones(project, auth=auth, **params)\n for mstone in milestones:\n if mstone['title'] == milestone:\n return mstone['number']\n else:\n raise ValueError(\"milestone %s not found\" % milestone)\n\ndef is_pull_request(issue):\n \"\"\"Return True if the given issue is a pull request.\"\"\"\n return bool(issue.get('pull_request', {}).get('html_url', None))\n\ndef get_authors(pr):\n print(\"getting authors for #%i\" % pr['number'], file=sys.stderr)\n h = make_auth_header()\n r = requests.get(pr['commits_url'], headers=h)\n r.raise_for_status()\n commits = r.json()\n authors = []\n for commit in commits:\n author = commit['commit']['author']\n authors.append(\"%s <%s>\" % (author['name'], author['email']))\n return authors\n\n# encode_multipart_formdata is from urllib3.filepost\n# The only change is to iter_fields, to enforce S3's required key ordering\n\ndef iter_fields(fields):\n fields = fields.copy()\n for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',\n 'Policy', 'Signature', 'Content-Type', 'file'):\n yield (key, fields.pop(key))\n for (k,v) in fields.items():\n yield k,v\n\ndef encode_multipart_formdata(fields, boundary=None):\n \"\"\"\n Encode a dictionary of ``fields`` using the multipart/form-data mime format.\n\n :param fields:\n Dictionary of fields or list of (key, value) field tuples. The key is\n treated as the field name, and the value as the body of the form-data\n bytes. If the value is a tuple of two elements, then the first element\n is treated as the filename of the form-data section.\n\n Field names and filenames must be unicode.\n\n :param boundary:\n If not specified, then a random boundary will be generated using\n :func:`mimetools.choose_boundary`.\n \"\"\"\n # copy requests imports in here:\n from io import BytesIO\n from requests.packages.urllib3.filepost import (\n choose_boundary, six, writer, b, get_content_type\n )\n body = BytesIO()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in iter_fields(fields):\n body.write(b('--%s\\r\\n' % (boundary)))\n\n if isinstance(value, tuple):\n filename, data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write(b('Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename))))\n else:\n data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n if isinstance(data, six.text_type):\n writer(body).write(data)\n else:\n body.write(data)\n\n body.write(b'\\r\\n')\n\n body.write(b('--%s--\\r\\n' % (boundary)))\n\n content_type = b('multipart/form-data; boundary=%s' % boundary)\n\n return body.getvalue(), content_type\n\n\ndef post_download(project, filename, name=None, description=\"\"):\n \"\"\"Upload a file to the GitHub downloads area\"\"\"\n if name is None:\n name = os.path.basename(filename)\n with open(filename, 'rb') as f:\n filedata = f.read()\n\n url = \"https://api.github.com/repos/{project}/downloads\".format(project=project)\n\n payload = json.dumps(dict(name=name, size=len(filedata),\n description=description))\n response = requests.post(url, data=payload, headers=make_auth_header())\n response.raise_for_status()\n reply = json.loads(response.content)\n s3_url = reply['s3_url']\n\n fields = dict(\n key=reply['path'],\n acl=reply['acl'],\n success_action_status=201,\n Filename=reply['name'],\n AWSAccessKeyId=reply['accesskeyid'],\n Policy=reply['policy'],\n Signature=reply['signature'],\n file=(reply['name'], filedata),\n )\n fields['Content-Type'] = reply['mime_type']\n data, content_type = encode_multipart_formdata(fields)\n s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})\n return s3r\n", "path": "tools/gh_api.py"}]} | 3,805 | 282 |
gh_patches_debug_38408 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py]
1 import logging
2 from typing import Dict, Optional, Type
3
4 from great_expectations.rule_based_profiler.data_assistant import DataAssistant
5 from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (
6 DataAssistantRunner,
7 )
8
9 logger = logging.getLogger(__name__)
10
11
12 class DataAssistantDispatcher:
13 """
14 DataAssistantDispatcher intercepts requests for "DataAssistant" classes by their registered names and manages their
15 associated "DataAssistantRunner" objects, which process invocations of calls to "DataAssistant" "run()" methods.
16 """
17
18 _registered_data_assistants: Dict[str, Type[DataAssistant]] = {}
19
20 def __init__(self, data_context: "BaseDataContext") -> None: # noqa: F821
21 """
22 Args:
23 data_context: BaseDataContext associated with DataAssistantDispatcher
24 """
25 self._data_context = data_context
26
27 self._data_assistant_runner_cache = {}
28
29 def __getattr__(self, name: str) -> DataAssistantRunner:
30 # Both, registered data_assistant_type and alias name are supported for invocation.
31
32 # _registered_data_assistants has both aliases and full names
33 data_assistant_cls: Optional[
34 Type[DataAssistant]
35 ] = DataAssistantDispatcher.get_data_assistant_impl(name=name)
36
37 # If "DataAssistant" is not registered, then raise "AttributeError", which is appropriate for "__getattr__()".
38 if data_assistant_cls is None:
39 raise AttributeError(
40 f'"{type(self).__name__}" object has no attribute "{name}".'
41 )
42
43 data_assistant_name: str = data_assistant_cls.data_assistant_type
44 data_assistant_runner: Optional[
45 DataAssistantRunner
46 ] = self._data_assistant_runner_cache.get(data_assistant_name)
47 if data_assistant_runner is None:
48 data_assistant_runner = DataAssistantRunner(
49 data_assistant_cls=data_assistant_cls,
50 data_context=self._data_context,
51 )
52 self._data_assistant_runner_cache[
53 data_assistant_name
54 ] = data_assistant_runner
55
56 return data_assistant_runner
57
58 @classmethod
59 def register_data_assistant(
60 cls,
61 data_assistant: Type[DataAssistant], # noqa: F821
62 ) -> None:
63 """
64 This method executes "run()" of effective "RuleBasedProfiler" and fills "DataAssistantResult" object with outputs.
65
66 Args:
67 data_assistant: "DataAssistant" class to be registered
68 """
69 data_assistant_type = data_assistant.data_assistant_type
70 cls._register(data_assistant_type, data_assistant)
71
72 alias: Optional[str] = data_assistant.__alias__
73 if alias is not None:
74 cls._register(alias, data_assistant)
75
76 @classmethod
77 def _register(cls, name: str, data_assistant: Type[DataAssistant]) -> None:
78 registered_data_assistants = cls._registered_data_assistants
79
80 if name in registered_data_assistants:
81 raise ValueError(f'Existing declarations of DataAssistant "{name}" found.')
82
83 logger.debug(
84 f'Registering the declaration of DataAssistant "{name}" took place.'
85 )
86 registered_data_assistants[name] = data_assistant
87
88 @classmethod
89 def get_data_assistant_impl(
90 cls,
91 name: Optional[str],
92 ) -> Optional[Type[DataAssistant]]: # noqa: F821
93 """
94 This method obtains (previously registered) "DataAssistant" class from DataAssistant Registry.
95
96 Note that it will clean the input string before checking against registered assistants.
97
98 Args:
99 data_assistant_type: String representing "snake case" version of "DataAssistant" class type
100
101 Returns:
102 Class inheriting "DataAssistant" if found; otherwise, None
103 """
104 if name is None:
105 return None
106 name = name.lower()
107 return cls._registered_data_assistants.get(name)
108
[end of great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
--- a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
+++ b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
@@ -1,5 +1,5 @@
import logging
-from typing import Dict, Optional, Type
+from typing import Dict, Optional, Set, Type
from great_expectations.rule_based_profiler.data_assistant import DataAssistant
from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (
@@ -61,7 +61,7 @@
data_assistant: Type[DataAssistant], # noqa: F821
) -> None:
"""
- This method executes "run()" of effective "RuleBasedProfiler" and fills "DataAssistantResult" object with outputs.
+ This method registers "DataAssistant" subclass for future instantiation and execution of its "run()" method.
Args:
data_assistant: "DataAssistant" class to be registered
@@ -89,14 +89,14 @@
def get_data_assistant_impl(
cls,
name: Optional[str],
- ) -> Optional[Type[DataAssistant]]: # noqa: F821
+ ) -> Optional[Type[DataAssistant]]:
"""
This method obtains (previously registered) "DataAssistant" class from DataAssistant Registry.
Note that it will clean the input string before checking against registered assistants.
Args:
- data_assistant_type: String representing "snake case" version of "DataAssistant" class type
+ name: String representing "snake case" version of "DataAssistant" class type
Returns:
Class inheriting "DataAssistant" if found; otherwise, None
@@ -105,3 +105,23 @@
return None
name = name.lower()
return cls._registered_data_assistants.get(name)
+
+ def __dir__(self):
+ """
+ This custom magic method is used to enable tab completion on "DataAssistantDispatcher" objects.
+ """
+ data_assistant_dispatcher_attrs: Set[str] = set(super().__dir__())
+ data_assistant_registered_names: Set[
+ str
+ ] = get_registered_data_assistant_names()
+ combined_dir_attrs: Set[str] = (
+ data_assistant_dispatcher_attrs | data_assistant_registered_names
+ )
+ return list(combined_dir_attrs)
+
+
+def get_registered_data_assistant_names() -> Set[str]:
+ """
+ This method returns names (registered data_assistant_type and alias name) of registered "DataAssistant" classes.
+ """
+ return set(DataAssistantDispatcher._registered_data_assistants.keys())
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n--- a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n+++ b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n@@ -1,5 +1,5 @@\n import logging\n-from typing import Dict, Optional, Type\n+from typing import Dict, Optional, Set, Type\n \n from great_expectations.rule_based_profiler.data_assistant import DataAssistant\n from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (\n@@ -61,7 +61,7 @@\n data_assistant: Type[DataAssistant], # noqa: F821\n ) -> None:\n \"\"\"\n- This method executes \"run()\" of effective \"RuleBasedProfiler\" and fills \"DataAssistantResult\" object with outputs.\n+ This method registers \"DataAssistant\" subclass for future instantiation and execution of its \"run()\" method.\n \n Args:\n data_assistant: \"DataAssistant\" class to be registered\n@@ -89,14 +89,14 @@\n def get_data_assistant_impl(\n cls,\n name: Optional[str],\n- ) -> Optional[Type[DataAssistant]]: # noqa: F821\n+ ) -> Optional[Type[DataAssistant]]:\n \"\"\"\n This method obtains (previously registered) \"DataAssistant\" class from DataAssistant Registry.\n \n Note that it will clean the input string before checking against registered assistants.\n \n Args:\n- data_assistant_type: String representing \"snake case\" version of \"DataAssistant\" class type\n+ name: String representing \"snake case\" version of \"DataAssistant\" class type\n \n Returns:\n Class inheriting \"DataAssistant\" if found; otherwise, None\n@@ -105,3 +105,23 @@\n return None\n name = name.lower()\n return cls._registered_data_assistants.get(name)\n+\n+ def __dir__(self):\n+ \"\"\"\n+ This custom magic method is used to enable tab completion on \"DataAssistantDispatcher\" objects.\n+ \"\"\"\n+ data_assistant_dispatcher_attrs: Set[str] = set(super().__dir__())\n+ data_assistant_registered_names: Set[\n+ str\n+ ] = get_registered_data_assistant_names()\n+ combined_dir_attrs: Set[str] = (\n+ data_assistant_dispatcher_attrs | data_assistant_registered_names\n+ )\n+ return list(combined_dir_attrs)\n+\n+\n+def get_registered_data_assistant_names() -> Set[str]:\n+ \"\"\"\n+ This method returns names (registered data_assistant_type and alias name) of registered \"DataAssistant\" classes.\n+ \"\"\"\n+ return set(DataAssistantDispatcher._registered_data_assistants.keys())\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom typing import Dict, Optional, Type\n\nfrom great_expectations.rule_based_profiler.data_assistant import DataAssistant\nfrom great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (\n DataAssistantRunner,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataAssistantDispatcher:\n \"\"\"\n DataAssistantDispatcher intercepts requests for \"DataAssistant\" classes by their registered names and manages their\n associated \"DataAssistantRunner\" objects, which process invocations of calls to \"DataAssistant\" \"run()\" methods.\n \"\"\"\n\n _registered_data_assistants: Dict[str, Type[DataAssistant]] = {}\n\n def __init__(self, data_context: \"BaseDataContext\") -> None: # noqa: F821\n \"\"\"\n Args:\n data_context: BaseDataContext associated with DataAssistantDispatcher\n \"\"\"\n self._data_context = data_context\n\n self._data_assistant_runner_cache = {}\n\n def __getattr__(self, name: str) -> DataAssistantRunner:\n # Both, registered data_assistant_type and alias name are supported for invocation.\n\n # _registered_data_assistants has both aliases and full names\n data_assistant_cls: Optional[\n Type[DataAssistant]\n ] = DataAssistantDispatcher.get_data_assistant_impl(name=name)\n\n # If \"DataAssistant\" is not registered, then raise \"AttributeError\", which is appropriate for \"__getattr__()\".\n if data_assistant_cls is None:\n raise AttributeError(\n f'\"{type(self).__name__}\" object has no attribute \"{name}\".'\n )\n\n data_assistant_name: str = data_assistant_cls.data_assistant_type\n data_assistant_runner: Optional[\n DataAssistantRunner\n ] = self._data_assistant_runner_cache.get(data_assistant_name)\n if data_assistant_runner is None:\n data_assistant_runner = DataAssistantRunner(\n data_assistant_cls=data_assistant_cls,\n data_context=self._data_context,\n )\n self._data_assistant_runner_cache[\n data_assistant_name\n ] = data_assistant_runner\n\n return data_assistant_runner\n\n @classmethod\n def register_data_assistant(\n cls,\n data_assistant: Type[DataAssistant], # noqa: F821\n ) -> None:\n \"\"\"\n This method executes \"run()\" of effective \"RuleBasedProfiler\" and fills \"DataAssistantResult\" object with outputs.\n\n Args:\n data_assistant: \"DataAssistant\" class to be registered\n \"\"\"\n data_assistant_type = data_assistant.data_assistant_type\n cls._register(data_assistant_type, data_assistant)\n\n alias: Optional[str] = data_assistant.__alias__\n if alias is not None:\n cls._register(alias, data_assistant)\n\n @classmethod\n def _register(cls, name: str, data_assistant: Type[DataAssistant]) -> None:\n registered_data_assistants = cls._registered_data_assistants\n\n if name in registered_data_assistants:\n raise ValueError(f'Existing declarations of DataAssistant \"{name}\" found.')\n\n logger.debug(\n f'Registering the declaration of DataAssistant \"{name}\" took place.'\n )\n registered_data_assistants[name] = data_assistant\n\n @classmethod\n def get_data_assistant_impl(\n cls,\n name: Optional[str],\n ) -> Optional[Type[DataAssistant]]: # noqa: F821\n \"\"\"\n This method obtains (previously registered) \"DataAssistant\" class from DataAssistant Registry.\n\n Note that it will clean the input string before checking against registered assistants.\n\n Args:\n data_assistant_type: String representing \"snake case\" version of \"DataAssistant\" class type\n\n Returns:\n Class inheriting \"DataAssistant\" if found; otherwise, None\n \"\"\"\n if name is None:\n return None\n name = name.lower()\n return cls._registered_data_assistants.get(name)\n", "path": "great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py"}]} | 1,661 | 625 |
gh_patches_debug_19029 | rasdani/github-patches | git_diff | pantsbuild__pants-11713 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filename too long when invoking PEX with --pex-verbosity >=3
Any PEX-using run of Pants at 44017419d (PEX 2.1.33) with `--pex-verbosity=3` (i.e. `./pants test --pex-verbosity=3 src/python/pants/util/::`) fails reproducibly with a line like:
```
/Users/stuhood/.cache/pants/named_caches/pex_root/venvs/34bbe9579368d3ee50226ae26ab367a6fe827866/800ae8ef047bd783543e8282c22bfdbee7b7fca8/pex: File name too long
```
Notably: `--pex-verbosity=1` does not trigger the issue.
Capturing the sandbox of the run using `--no-process-execution-cleanup-local-dirs` allows for reproducing the issue with the `__run.sh` script (have captured a repro). The complete log of `./__run.sh` is attached.
[ftl.log](https://github.com/pantsbuild/pants/files/6127740/ftl.log)
----
The content of the `pants_plugins.pex_pex_shim.sh` file (attached as [pants_plugins.pex_pex_shim.sh.txt](https://github.com/pantsbuild/pants/files/6127756/pants_plugins.pex_pex_shim.sh.txt)) seems to indicate that some of the verbose output gets accidentally embedded in the generated shim script.
</issue>
<code>
[start of src/python/pants/backend/python/util_rules/pex_cli.py]
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import dataclasses
5 import os
6 from dataclasses import dataclass
7 from pathlib import Path
8 from typing import Iterable, List, Mapping, Optional, Tuple
9
10 from pants.backend.python.subsystems.python_native_code import PythonNativeCode
11 from pants.backend.python.util_rules import pex_environment
12 from pants.backend.python.util_rules.pex_environment import (
13 PexEnvironment,
14 PexRuntimeEnvironment,
15 PythonExecutable,
16 )
17 from pants.core.util_rules import external_tool
18 from pants.core.util_rules.external_tool import (
19 DownloadedExternalTool,
20 ExternalToolRequest,
21 TemplatedExternalTool,
22 )
23 from pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests
24 from pants.engine.internals.selectors import MultiGet
25 from pants.engine.platform import Platform
26 from pants.engine.process import Process, ProcessCacheScope
27 from pants.engine.rules import Get, collect_rules, rule
28 from pants.option.global_options import GlobalOptions
29 from pants.util.frozendict import FrozenDict
30 from pants.util.logging import LogLevel
31 from pants.util.meta import classproperty, frozen_after_init
32 from pants.util.strutil import create_path_env_var
33
34
35 class PexBinary(TemplatedExternalTool):
36 options_scope = "download-pex-bin"
37 name = "pex"
38 help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
39
40 default_version = "v2.1.33"
41 default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
42
43 @classproperty
44 def default_known_versions(cls):
45 return [
46 "|".join(
47 (
48 cls.default_version,
49 plat,
50 "7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064",
51 "3596348",
52 )
53 )
54 for plat in ["darwin", "linux"]
55 ]
56
57
58 @frozen_after_init
59 @dataclass(unsafe_hash=True)
60 class PexCliProcess:
61 argv: Tuple[str, ...]
62 description: str = dataclasses.field(compare=False)
63 additional_input_digest: Optional[Digest]
64 extra_env: Optional[FrozenDict[str, str]]
65 output_files: Optional[Tuple[str, ...]]
66 output_directories: Optional[Tuple[str, ...]]
67 python: Optional[PythonExecutable]
68 level: LogLevel
69 cache_scope: Optional[ProcessCacheScope]
70
71 def __init__(
72 self,
73 *,
74 argv: Iterable[str],
75 description: str,
76 additional_input_digest: Optional[Digest] = None,
77 extra_env: Optional[Mapping[str, str]] = None,
78 output_files: Optional[Iterable[str]] = None,
79 output_directories: Optional[Iterable[str]] = None,
80 python: Optional[PythonExecutable] = None,
81 level: LogLevel = LogLevel.INFO,
82 cache_scope: Optional[ProcessCacheScope] = None,
83 ) -> None:
84 self.argv = tuple(argv)
85 self.description = description
86 self.additional_input_digest = additional_input_digest
87 self.extra_env = FrozenDict(extra_env) if extra_env else None
88 self.output_files = tuple(output_files) if output_files else None
89 self.output_directories = tuple(output_directories) if output_directories else None
90 self.python = python
91 self.level = level
92 self.cache_scope = cache_scope
93 self.__post_init__()
94
95 def __post_init__(self) -> None:
96 if "--pex-root-path" in self.argv:
97 raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
98
99
100 @rule
101 async def setup_pex_cli_process(
102 request: PexCliProcess,
103 pex_binary: PexBinary,
104 pex_env: PexEnvironment,
105 python_native_code: PythonNativeCode,
106 global_options: GlobalOptions,
107 pex_runtime_env: PexRuntimeEnvironment,
108 ) -> Process:
109 tmpdir = ".tmp"
110 gets: List[Get] = [
111 Get(DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)),
112 Get(Digest, CreateDigest([Directory(tmpdir)])),
113 ]
114 cert_args = []
115
116 # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.
117 # Instead we manually create a FileContent for it.
118 if global_options.options.ca_certs_path:
119 ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()
120 chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)
121
122 gets.append(
123 Get(
124 Digest,
125 CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),
126 )
127 )
128 cert_args = ["--cert", chrooted_ca_certs_path]
129
130 downloaded_pex_bin, *digests_to_merge = await MultiGet(gets)
131 digests_to_merge.append(downloaded_pex_bin.digest)
132 if request.additional_input_digest:
133 digests_to_merge.append(request.additional_input_digest)
134 input_digest = await Get(Digest, MergeDigests(digests_to_merge))
135
136 pex_root_path = ".cache/pex_root"
137 argv = [
138 downloaded_pex_bin.exe,
139 *cert_args,
140 "--python-path",
141 create_path_env_var(pex_env.interpreter_search_paths),
142 "--pex-root",
143 pex_root_path,
144 # Ensure Pex and its subprocesses create temporary files in the the process execution
145 # sandbox. It may make sense to do this generally for Processes, but in the short term we
146 # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
147 # perform. Making the TMPDIR local to the sandbox allows control via
148 # --local-execution-root-dir for the local case and should work well with remote cases where
149 # a remoting implementation has to allow for processes producing large binaries in a
150 # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
151 # environment variable allows Pex to absolutize the path ensuring subprocesses that change
152 # CWD can find the TMPDIR.
153 "--tmpdir",
154 tmpdir,
155 ]
156 if pex_runtime_env.verbosity > 0:
157 argv.append(f"-{'v' * pex_runtime_env.verbosity}")
158
159 # NB: This comes at the end of the argv because the request may use `--` passthrough args,
160 # which must come at the end.
161 argv.extend(request.argv)
162 normalized_argv = pex_env.create_argv(*argv, python=request.python)
163 env = {
164 **pex_env.environment_dict(python_configured=request.python is not None),
165 **python_native_code.environment_dict,
166 **(request.extra_env or {}),
167 }
168
169 return Process(
170 normalized_argv,
171 description=request.description,
172 input_digest=input_digest,
173 env=env,
174 output_files=request.output_files,
175 output_directories=request.output_directories,
176 append_only_caches={"pex_root": pex_root_path},
177 level=request.level,
178 cache_scope=request.cache_scope,
179 )
180
181
182 def rules():
183 return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]
184
[end of src/python/pants/backend/python/util_rules/pex_cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py
--- a/src/python/pants/backend/python/util_rules/pex_cli.py
+++ b/src/python/pants/backend/python/util_rules/pex_cli.py
@@ -37,7 +37,7 @@
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
- default_version = "v2.1.33"
+ default_version = "v2.1.34"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
@classproperty
@@ -47,8 +47,8 @@
(
cls.default_version,
plat,
- "7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064",
- "3596348",
+ "9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f",
+ "3597768",
)
)
for plat in ["darwin", "linux"]
| {"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py\n--- a/src/python/pants/backend/python/util_rules/pex_cli.py\n+++ b/src/python/pants/backend/python/util_rules/pex_cli.py\n@@ -37,7 +37,7 @@\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n \n- default_version = \"v2.1.33\"\n+ default_version = \"v2.1.34\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n \n @classproperty\n@@ -47,8 +47,8 @@\n (\n cls.default_version,\n plat,\n- \"7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064\",\n- \"3596348\",\n+ \"9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f\",\n+ \"3597768\",\n )\n )\n for plat in [\"darwin\", \"linux\"]\n", "issue": "Filename too long when invoking PEX with --pex-verbosity >=3\nAny PEX-using run of Pants at 44017419d (PEX 2.1.33) with `--pex-verbosity=3` (i.e. `./pants test --pex-verbosity=3 src/python/pants/util/::`) fails reproducibly with a line like:\r\n```\r\n/Users/stuhood/.cache/pants/named_caches/pex_root/venvs/34bbe9579368d3ee50226ae26ab367a6fe827866/800ae8ef047bd783543e8282c22bfdbee7b7fca8/pex: File name too long\r\n```\r\nNotably: `--pex-verbosity=1` does not trigger the issue.\r\n\r\nCapturing the sandbox of the run using `--no-process-execution-cleanup-local-dirs` allows for reproducing the issue with the `__run.sh` script (have captured a repro). The complete log of `./__run.sh` is attached.\r\n\r\n[ftl.log](https://github.com/pantsbuild/pants/files/6127740/ftl.log)\r\n\r\n----\r\n\r\nThe content of the `pants_plugins.pex_pex_shim.sh` file (attached as [pants_plugins.pex_pex_shim.sh.txt](https://github.com/pantsbuild/pants/files/6127756/pants_plugins.pex_pex_shim.sh.txt)) seems to indicate that some of the verbose output gets accidentally embedded in the generated shim script.\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport dataclasses\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCode\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import (\n PexEnvironment,\n PexRuntimeEnvironment,\n PythonExecutable,\n)\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty, frozen_after_init\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexBinary(TemplatedExternalTool):\n options_scope = \"download-pex-bin\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.33\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064\",\n \"3596348\",\n )\n )\n for plat in [\"darwin\", \"linux\"]\n ]\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PexCliProcess:\n argv: Tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n python: Optional[PythonExecutable]\n level: LogLevel\n cache_scope: Optional[ProcessCacheScope]\n\n def __init__(\n self,\n *,\n argv: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n python: Optional[PythonExecutable] = None,\n level: LogLevel = LogLevel.INFO,\n cache_scope: Optional[ProcessCacheScope] = None,\n ) -> None:\n self.argv = tuple(argv)\n self.description = description\n self.additional_input_digest = additional_input_digest\n self.extra_env = FrozenDict(extra_env) if extra_env else None\n self.output_files = tuple(output_files) if output_files else None\n self.output_directories = tuple(output_directories) if output_directories else None\n self.python = python\n self.level = level\n self.cache_scope = cache_scope\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.argv:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_binary: PexBinary,\n pex_env: PexEnvironment,\n python_native_code: PythonNativeCode,\n global_options: GlobalOptions,\n pex_runtime_env: PexRuntimeEnvironment,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [\n Get(DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)),\n Get(Digest, CreateDigest([Directory(tmpdir)])),\n ]\n cert_args = []\n\n # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.\n # Instead we manually create a FileContent for it.\n if global_options.options.ca_certs_path:\n ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()\n chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)\n\n gets.append(\n Get(\n Digest,\n CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),\n )\n )\n cert_args = [\"--cert\", chrooted_ca_certs_path]\n\n downloaded_pex_bin, *digests_to_merge = await MultiGet(gets)\n digests_to_merge.append(downloaded_pex_bin.digest)\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n pex_root_path = \".cache/pex_root\"\n argv = [\n downloaded_pex_bin.exe,\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n \"--pex-root\",\n pex_root_path,\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n if pex_runtime_env.verbosity > 0:\n argv.append(f\"-{'v' * pex_runtime_env.verbosity}\")\n\n # NB: This comes at the end of the argv because the request may use `--` passthrough args,\n # which must come at the end.\n argv.extend(request.argv)\n normalized_argv = pex_env.create_argv(*argv, python=request.python)\n env = {\n **pex_env.environment_dict(python_configured=request.python is not None),\n **python_native_code.environment_dict,\n **(request.extra_env or {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches={\"pex_root\": pex_root_path},\n level=request.level,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}]} | 2,972 | 338 |
gh_patches_debug_13415 | rasdani/github-patches | git_diff | AnalogJ__lexicon-106 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
function update_record() in namesilo.py has wrong query parameter
origin: data['rdata'] = content
modified: data['rrvalue'] = content
</issue>
<code>
[start of lexicon/providers/namesilo.py]
1 from __future__ import print_function
2 from __future__ import absolute_import
3 from .base import Provider as BaseProvider
4 import requests
5 from xml.etree import ElementTree
6
7 def ProviderParser(subparser):
8 subparser.add_argument("--auth-token", help="specify key used authenticate")
9
10
11 class Provider(BaseProvider):
12
13 def __init__(self, options, provider_options={}):
14 super(Provider, self).__init__(options)
15 self.domain_id = None
16 self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'
17
18 def authenticate(self):
19
20 payload = self._get('/getDomainInfo', {'domain': self.options['domain']})
21 self.domain_id = self.options['domain']
22
23
24 # Create record. If record already exists with the same content, do nothing'
25 def create_record(self, type, name, content):
26 record = {
27 'domain': self.domain_id,
28 'rrhost': self._relative_name(name),
29 'rrtype': type,
30 'rrvalue': content
31 }
32 if self.options.get('ttl'):
33 record['rrttl'] = self.options.get('ttl')
34 payload = self._get('/dnsAddRecord', record)
35 print('create_record: {0}'.format(True))
36 return True
37
38 # List all records. Return an empty list if no records found
39 # type, name and content are used to filter records.
40 # If possible filter during the query, otherwise filter after response is received.
41 def list_records(self, type=None, name=None, content=None):
42 query = {'domain': self.domain_id}
43
44 payload = self._get('/dnsListRecords', query)
45 records = []
46 for record in payload.find('reply').findall('resource_record'):
47 processed_record = {
48 'type': record.find('type').text,
49 'name': record.find('host').text,
50 'ttl': record.find('ttl').text,
51 'content': record.find('value').text,
52 'id': record.find('record_id').text
53 }
54 records.append(processed_record)
55
56 if type:
57 records = [record for record in records if record['type'] == type]
58 if name:
59 records = [record for record in records if record['name'] == self._full_name(name)]
60 if content:
61 records = [record for record in records if record['content'] == content]
62
63 print('list_records: {0}'.format(records))
64 return records
65
66 # Create or update a record.
67 def update_record(self, identifier, type=None, name=None, content=None):
68
69 data = {
70 'domain': self.domain_id,
71 'rrid': identifier
72 }
73 # if type:
74 # data['rtype'] = type
75 if name:
76 data['rrhost'] = self._relative_name(name)
77 if content:
78 data['rdata'] = content
79 if self.options.get('ttl'):
80 data['rrttl'] = self.options.get('ttl')
81
82 payload = self._get('/dnsUpdateRecord', data)
83
84 print('update_record: {0}'.format(True))
85 return True
86
87 # Delete an existing record.
88 # If record does not exist, do nothing.
89 def delete_record(self, identifier=None, type=None, name=None, content=None):
90 data = {
91 'domain': self.domain_id
92 }
93 if not identifier:
94 records = self.list_records(type, name, content)
95 print(records)
96 if len(records) == 1:
97 data['rrid'] = records[0]['id']
98 else:
99 raise Exception('Record identifier could not be found.')
100 else:
101 data['rrid'] = identifier
102 payload = self._get('/dnsDeleteRecord', data)
103
104 print('delete_record: {0}'.format(True))
105 return True
106
107
108 # Helpers
109 def _request(self, action='GET', url='/', data=None, query_params=None):
110 if data is None:
111 data = {}
112 if query_params is None:
113 query_params = {}
114 query_params['version'] = 1
115 query_params['type'] = 'xml'
116 query_params['key'] = self.options['auth_token']
117 r = requests.request(action, self.api_endpoint + url, params=query_params)
118 #data=json.dumps(data))
119 r.raise_for_status() # if the request fails for any reason, throw an error.
120 # TODO: check if the response is an error using
121 tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))
122 root = tree.getroot()
123 if root.find('reply').find('code').text != '300':
124 raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
125
126
127 return root
[end of lexicon/providers/namesilo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/namesilo.py b/lexicon/providers/namesilo.py
--- a/lexicon/providers/namesilo.py
+++ b/lexicon/providers/namesilo.py
@@ -75,7 +75,7 @@
if name:
data['rrhost'] = self._relative_name(name)
if content:
- data['rdata'] = content
+ data['rrvalue'] = content
if self.options.get('ttl'):
data['rrttl'] = self.options.get('ttl')
@@ -124,4 +124,4 @@
raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
- return root
\ No newline at end of file
+ return root
| {"golden_diff": "diff --git a/lexicon/providers/namesilo.py b/lexicon/providers/namesilo.py\n--- a/lexicon/providers/namesilo.py\n+++ b/lexicon/providers/namesilo.py\n@@ -75,7 +75,7 @@\n if name:\n data['rrhost'] = self._relative_name(name)\n if content:\n- data['rdata'] = content\n+ data['rrvalue'] = content\n if self.options.get('ttl'):\n data['rrttl'] = self.options.get('ttl')\n \n@@ -124,4 +124,4 @@\n raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))\n \n \n- return root\n\\ No newline at end of file\n+ return root\n", "issue": "function update_record() in namesilo.py has wrong query parameter\norigin: data['rdata'] = content\r\nmodified: data['rrvalue'] = content\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .base import Provider as BaseProvider\nimport requests\nfrom xml.etree import ElementTree\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify key used authenticate\")\n\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, provider_options={}):\n super(Provider, self).__init__(options)\n self.domain_id = None\n self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'\n\n def authenticate(self):\n\n payload = self._get('/getDomainInfo', {'domain': self.options['domain']})\n self.domain_id = self.options['domain']\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n record = {\n 'domain': self.domain_id,\n 'rrhost': self._relative_name(name),\n 'rrtype': type,\n 'rrvalue': content\n }\n if self.options.get('ttl'):\n record['rrttl'] = self.options.get('ttl')\n payload = self._get('/dnsAddRecord', record)\n print('create_record: {0}'.format(True))\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def list_records(self, type=None, name=None, content=None):\n query = {'domain': self.domain_id}\n\n payload = self._get('/dnsListRecords', query)\n records = []\n for record in payload.find('reply').findall('resource_record'):\n processed_record = {\n 'type': record.find('type').text,\n 'name': record.find('host').text,\n 'ttl': record.find('ttl').text,\n 'content': record.find('value').text,\n 'id': record.find('record_id').text\n }\n records.append(processed_record)\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n records = [record for record in records if record['name'] == self._full_name(name)]\n if content:\n records = [record for record in records if record['content'] == content]\n\n print('list_records: {0}'.format(records))\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n\n data = {\n 'domain': self.domain_id,\n 'rrid': identifier\n }\n # if type:\n # data['rtype'] = type\n if name:\n data['rrhost'] = self._relative_name(name)\n if content:\n data['rdata'] = content\n if self.options.get('ttl'):\n data['rrttl'] = self.options.get('ttl')\n\n payload = self._get('/dnsUpdateRecord', data)\n\n print('update_record: {0}'.format(True))\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n data = {\n 'domain': self.domain_id\n }\n if not identifier:\n records = self.list_records(type, name, content)\n print(records)\n if len(records) == 1:\n data['rrid'] = records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n else:\n data['rrid'] = identifier\n payload = self._get('/dnsDeleteRecord', data)\n\n print('delete_record: {0}'.format(True))\n return True\n\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n query_params['version'] = 1\n query_params['type'] = 'xml'\n query_params['key'] = self.options['auth_token']\n r = requests.request(action, self.api_endpoint + url, params=query_params)\n #data=json.dumps(data))\n r.raise_for_status() # if the request fails for any reason, throw an error.\n # TODO: check if the response is an error using\n tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))\n root = tree.getroot()\n if root.find('reply').find('code').text != '300':\n raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))\n\n\n return root", "path": "lexicon/providers/namesilo.py"}]} | 1,896 | 189 |
gh_patches_debug_21013 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1792 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
language settings don't persist past browser sessions
**Describe the bug**
Language settings don't persist past browser sessions. If I change my language settings, and close my browser without logging out, when I open the browser again, the language has reverted to English. However, if I logout and log back in, bookwyrm is in the language I specified.
**To Reproduce**
Steps to reproduce the behavior:
( assuming your bookwyrm is already in English)
1. Go to https://bookwyrm.social/preferences/profile
2. Select a language other than English under the Language setting and Save
3. Close the browser window and reopen bookwyrm
4. See that bookwyrm is in English and not the language you selected
**Expected behavior**
I would expect that my language settings are persistent across browser sessions.
**Instance**
bookwyrm.social
**Additional Context**
I have been able to repro this bug on Safari and Brave on macOS BigSur
</issue>
<code>
[start of bookwyrm/views/helpers.py]
1 """ helper functions used in various views """
2 import re
3 from datetime import datetime
4 import dateutil.parser
5 import dateutil.tz
6 from dateutil.parser import ParserError
7
8 from requests import HTTPError
9 from django.db.models import Q
10 from django.http import Http404
11 from django.utils import translation
12
13 from bookwyrm import activitypub, models, settings
14 from bookwyrm.connectors import ConnectorException, get_data
15 from bookwyrm.status import create_generated_note
16 from bookwyrm.utils import regex
17
18
19 # pylint: disable=unnecessary-pass
20 class WebFingerError(Exception):
21 """empty error class for problems finding user information with webfinger"""
22
23 pass
24
25
26 def get_user_from_username(viewer, username):
27 """helper function to resolve a localname or a username to a user"""
28 if viewer.is_authenticated and viewer.localname == username:
29 # that's yourself, fool
30 return viewer
31
32 # raises 404 if the user isn't found
33 try:
34 return models.User.viewer_aware_objects(viewer).get(localname=username)
35 except models.User.DoesNotExist:
36 pass
37
38 # if the localname didn't match, try the username
39 try:
40 return models.User.viewer_aware_objects(viewer).get(username=username)
41 except models.User.DoesNotExist:
42 raise Http404()
43
44
45 def is_api_request(request):
46 """check whether a request is asking for html or data"""
47 return "json" in request.headers.get("Accept", "") or re.match(
48 r".*\.json/?$", request.path
49 )
50
51
52 def is_bookwyrm_request(request):
53 """check if the request is coming from another bookwyrm instance"""
54 user_agent = request.headers.get("User-Agent")
55 if user_agent is None or re.search(regex.BOOKWYRM_USER_AGENT, user_agent) is None:
56 return False
57 return True
58
59
60 def handle_remote_webfinger(query):
61 """webfingerin' other servers"""
62 user = None
63
64 # usernames could be @user@domain or user@domain
65 if not query:
66 return None
67 if query[0] == "@":
68 query = query[1:]
69 try:
70 domain = query.split("@")[1]
71 except IndexError:
72 return None
73
74 try:
75 user = models.User.objects.get(username__iexact=query)
76 except models.User.DoesNotExist:
77 url = f"https://{domain}/.well-known/webfinger?resource=acct:{query}"
78 try:
79 data = get_data(url)
80 except (ConnectorException, HTTPError):
81 return None
82
83 for link in data.get("links"):
84 if link.get("rel") == "self":
85 try:
86 user = activitypub.resolve_remote_id(
87 link["href"], model=models.User
88 )
89 except (KeyError, activitypub.ActivitySerializerError):
90 return None
91 return user
92
93
94 def subscribe_remote_webfinger(query):
95 """get subscribe template from other servers"""
96 template = None
97 # usernames could be @user@domain or user@domain
98 if not query:
99 return WebFingerError("invalid_username")
100
101 if query[0] == "@":
102 query = query[1:]
103
104 try:
105 domain = query.split("@")[1]
106 except IndexError:
107 return WebFingerError("invalid_username")
108
109 url = f"https://{domain}/.well-known/webfinger?resource=acct:{query}"
110
111 try:
112 data = get_data(url)
113 except (ConnectorException, HTTPError):
114 return WebFingerError("user_not_found")
115
116 for link in data.get("links"):
117 if link.get("rel") == "http://ostatus.org/schema/1.0/subscribe":
118 template = link["template"]
119
120 return template
121
122
123 def get_edition(book_id):
124 """look up a book in the db and return an edition"""
125 book = models.Book.objects.select_subclasses().get(id=book_id)
126 if isinstance(book, models.Work):
127 book = book.default_edition
128 return book
129
130
131 def handle_reading_status(user, shelf, book, privacy):
132 """post about a user reading a book"""
133 # tell the world about this cool thing that happened
134 try:
135 message = {
136 "to-read": "wants to read",
137 "reading": "started reading",
138 "read": "finished reading",
139 }[shelf.identifier]
140 except KeyError:
141 # it's a non-standard shelf, don't worry about it
142 return
143
144 status = create_generated_note(user, message, mention_books=[book], privacy=privacy)
145 status.save()
146
147
148 def is_blocked(viewer, user):
149 """is this viewer blocked by the user?"""
150 if viewer.is_authenticated and viewer in user.blocks.all():
151 return True
152 return False
153
154
155 def get_landing_books():
156 """list of books for the landing page"""
157
158 return list(
159 set(
160 models.Edition.objects.filter(
161 review__published_date__isnull=False,
162 review__deleted=False,
163 review__user__local=True,
164 review__privacy__in=["public", "unlisted"],
165 )
166 .exclude(cover__exact="")
167 .distinct()
168 .order_by("-review__published_date")[:6]
169 )
170 )
171
172
173 def load_date_in_user_tz_as_utc(date_str: str, user: models.User) -> datetime:
174 """ensures that data is stored consistently in the UTC timezone"""
175 if not date_str:
176 return None
177 user_tz = dateutil.tz.gettz(user.preferred_timezone)
178 date = dateutil.parser.parse(date_str, ignoretz=True)
179 try:
180 return date.replace(tzinfo=user_tz).astimezone(dateutil.tz.UTC)
181 except ParserError:
182 return None
183
184
185 def set_language(user, response):
186 """Updates a user's language"""
187 if user.preferred_language:
188 translation.activate(user.preferred_language)
189 response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)
190 return response
191
192
193 def filter_stream_by_status_type(activities, allowed_types=None):
194 """filter out activities based on types"""
195 if not allowed_types:
196 allowed_types = []
197
198 if "review" not in allowed_types:
199 activities = activities.filter(
200 Q(review__isnull=True), Q(boost__boosted_status__review__isnull=True)
201 )
202 if "comment" not in allowed_types:
203 activities = activities.filter(
204 Q(comment__isnull=True), Q(boost__boosted_status__comment__isnull=True)
205 )
206 if "quotation" not in allowed_types:
207 activities = activities.filter(
208 Q(quotation__isnull=True), Q(boost__boosted_status__quotation__isnull=True)
209 )
210 if "everything" not in allowed_types:
211 activities = activities.filter(
212 Q(generatednote__isnull=True),
213 Q(boost__boosted_status__generatednote__isnull=True),
214 )
215
216 return activities
217
[end of bookwyrm/views/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/helpers.py b/bookwyrm/views/helpers.py
--- a/bookwyrm/views/helpers.py
+++ b/bookwyrm/views/helpers.py
@@ -1,12 +1,13 @@
""" helper functions used in various views """
import re
-from datetime import datetime
+from datetime import datetime, timedelta
import dateutil.parser
import dateutil.tz
from dateutil.parser import ParserError
from requests import HTTPError
from django.db.models import Q
+from django.conf import settings as django_settings
from django.http import Http404
from django.utils import translation
@@ -186,7 +187,11 @@
"""Updates a user's language"""
if user.preferred_language:
translation.activate(user.preferred_language)
- response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)
+ response.set_cookie(
+ settings.LANGUAGE_COOKIE_NAME,
+ user.preferred_language,
+ expires=datetime.now() + timedelta(seconds=django_settings.SESSION_COOKIE_AGE),
+ )
return response
| {"golden_diff": "diff --git a/bookwyrm/views/helpers.py b/bookwyrm/views/helpers.py\n--- a/bookwyrm/views/helpers.py\n+++ b/bookwyrm/views/helpers.py\n@@ -1,12 +1,13 @@\n \"\"\" helper functions used in various views \"\"\"\n import re\n-from datetime import datetime\n+from datetime import datetime, timedelta\n import dateutil.parser\n import dateutil.tz\n from dateutil.parser import ParserError\n \n from requests import HTTPError\n from django.db.models import Q\n+from django.conf import settings as django_settings\n from django.http import Http404\n from django.utils import translation\n \n@@ -186,7 +187,11 @@\n \"\"\"Updates a user's language\"\"\"\n if user.preferred_language:\n translation.activate(user.preferred_language)\n- response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)\n+ response.set_cookie(\n+ settings.LANGUAGE_COOKIE_NAME,\n+ user.preferred_language,\n+ expires=datetime.now() + timedelta(seconds=django_settings.SESSION_COOKIE_AGE),\n+ )\n return response\n", "issue": "language settings don't persist past browser sessions\n**Describe the bug**\r\nLanguage settings don't persist past browser sessions. If I change my language settings, and close my browser without logging out, when I open the browser again, the language has reverted to English. However, if I logout and log back in, bookwyrm is in the language I specified. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n( assuming your bookwyrm is already in English)\r\n1. Go to https://bookwyrm.social/preferences/profile\r\n2. Select a language other than English under the Language setting and Save\r\n3. Close the browser window and reopen bookwyrm\r\n4. See that bookwyrm is in English and not the language you selected\r\n\r\n**Expected behavior**\r\nI would expect that my language settings are persistent across browser sessions. \r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n**Additional Context**\r\nI have been able to repro this bug on Safari and Brave on macOS BigSur\n", "before_files": [{"content": "\"\"\" helper functions used in various views \"\"\"\nimport re\nfrom datetime import datetime\nimport dateutil.parser\nimport dateutil.tz\nfrom dateutil.parser import ParserError\n\nfrom requests import HTTPError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.utils import translation\n\nfrom bookwyrm import activitypub, models, settings\nfrom bookwyrm.connectors import ConnectorException, get_data\nfrom bookwyrm.status import create_generated_note\nfrom bookwyrm.utils import regex\n\n\n# pylint: disable=unnecessary-pass\nclass WebFingerError(Exception):\n \"\"\"empty error class for problems finding user information with webfinger\"\"\"\n\n pass\n\n\ndef get_user_from_username(viewer, username):\n \"\"\"helper function to resolve a localname or a username to a user\"\"\"\n if viewer.is_authenticated and viewer.localname == username:\n # that's yourself, fool\n return viewer\n\n # raises 404 if the user isn't found\n try:\n return models.User.viewer_aware_objects(viewer).get(localname=username)\n except models.User.DoesNotExist:\n pass\n\n # if the localname didn't match, try the username\n try:\n return models.User.viewer_aware_objects(viewer).get(username=username)\n except models.User.DoesNotExist:\n raise Http404()\n\n\ndef is_api_request(request):\n \"\"\"check whether a request is asking for html or data\"\"\"\n return \"json\" in request.headers.get(\"Accept\", \"\") or re.match(\n r\".*\\.json/?$\", request.path\n )\n\n\ndef is_bookwyrm_request(request):\n \"\"\"check if the request is coming from another bookwyrm instance\"\"\"\n user_agent = request.headers.get(\"User-Agent\")\n if user_agent is None or re.search(regex.BOOKWYRM_USER_AGENT, user_agent) is None:\n return False\n return True\n\n\ndef handle_remote_webfinger(query):\n \"\"\"webfingerin' other servers\"\"\"\n user = None\n\n # usernames could be @user@domain or user@domain\n if not query:\n return None\n if query[0] == \"@\":\n query = query[1:]\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return None\n\n try:\n user = models.User.objects.get(username__iexact=query)\n except models.User.DoesNotExist:\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return None\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"self\":\n try:\n user = activitypub.resolve_remote_id(\n link[\"href\"], model=models.User\n )\n except (KeyError, activitypub.ActivitySerializerError):\n return None\n return user\n\n\ndef subscribe_remote_webfinger(query):\n \"\"\"get subscribe template from other servers\"\"\"\n template = None\n # usernames could be @user@domain or user@domain\n if not query:\n return WebFingerError(\"invalid_username\")\n\n if query[0] == \"@\":\n query = query[1:]\n\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return WebFingerError(\"invalid_username\")\n\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return WebFingerError(\"user_not_found\")\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"http://ostatus.org/schema/1.0/subscribe\":\n template = link[\"template\"]\n\n return template\n\n\ndef get_edition(book_id):\n \"\"\"look up a book in the db and return an edition\"\"\"\n book = models.Book.objects.select_subclasses().get(id=book_id)\n if isinstance(book, models.Work):\n book = book.default_edition\n return book\n\n\ndef handle_reading_status(user, shelf, book, privacy):\n \"\"\"post about a user reading a book\"\"\"\n # tell the world about this cool thing that happened\n try:\n message = {\n \"to-read\": \"wants to read\",\n \"reading\": \"started reading\",\n \"read\": \"finished reading\",\n }[shelf.identifier]\n except KeyError:\n # it's a non-standard shelf, don't worry about it\n return\n\n status = create_generated_note(user, message, mention_books=[book], privacy=privacy)\n status.save()\n\n\ndef is_blocked(viewer, user):\n \"\"\"is this viewer blocked by the user?\"\"\"\n if viewer.is_authenticated and viewer in user.blocks.all():\n return True\n return False\n\n\ndef get_landing_books():\n \"\"\"list of books for the landing page\"\"\"\n\n return list(\n set(\n models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__deleted=False,\n review__user__local=True,\n review__privacy__in=[\"public\", \"unlisted\"],\n )\n .exclude(cover__exact=\"\")\n .distinct()\n .order_by(\"-review__published_date\")[:6]\n )\n )\n\n\ndef load_date_in_user_tz_as_utc(date_str: str, user: models.User) -> datetime:\n \"\"\"ensures that data is stored consistently in the UTC timezone\"\"\"\n if not date_str:\n return None\n user_tz = dateutil.tz.gettz(user.preferred_timezone)\n date = dateutil.parser.parse(date_str, ignoretz=True)\n try:\n return date.replace(tzinfo=user_tz).astimezone(dateutil.tz.UTC)\n except ParserError:\n return None\n\n\ndef set_language(user, response):\n \"\"\"Updates a user's language\"\"\"\n if user.preferred_language:\n translation.activate(user.preferred_language)\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)\n return response\n\n\ndef filter_stream_by_status_type(activities, allowed_types=None):\n \"\"\"filter out activities based on types\"\"\"\n if not allowed_types:\n allowed_types = []\n\n if \"review\" not in allowed_types:\n activities = activities.filter(\n Q(review__isnull=True), Q(boost__boosted_status__review__isnull=True)\n )\n if \"comment\" not in allowed_types:\n activities = activities.filter(\n Q(comment__isnull=True), Q(boost__boosted_status__comment__isnull=True)\n )\n if \"quotation\" not in allowed_types:\n activities = activities.filter(\n Q(quotation__isnull=True), Q(boost__boosted_status__quotation__isnull=True)\n )\n if \"everything\" not in allowed_types:\n activities = activities.filter(\n Q(generatednote__isnull=True),\n Q(boost__boosted_status__generatednote__isnull=True),\n )\n\n return activities\n", "path": "bookwyrm/views/helpers.py"}]} | 2,776 | 229 |
gh_patches_debug_16041 | rasdani/github-patches | git_diff | pypa__setuptools-3709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] AttributeError: module 'distutils.log' has no attribute 'warning'
### setuptools version
setuptools==65.6.2
### Python version
Python 3.8.15
### OS
macOS 12.5.1
### Additional environment information
_No response_
### Description
Installing a package `kappa=0.6.0` doesn't work. I think it could be the issue on setuptools's side.
### Expected behavior
Installing the package works.
### How to Reproduce
```console
$ python3.8 -m venv .venv
$ source .venv/bin/activate
$ pip install setuptools==65.6.2
$ pip install wheel
$ pip install kappa==0.6.0
```
### Output
```console
(.venv) dmytro@Dmytros-MacBook-Pro install_kappa % pip install kappa==0.6.0
Collecting kappa==0.6.0
Using cached kappa-0.6.0.tar.gz (29 kB)
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [20 lines of output]
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<pip-setuptools-caller>", line 34, in <module>
File "/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py", line 54, in <module>
run_setup()
File "/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py", line 18, in run_setup
setup(
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/__init__.py", line 87, in setup
return distutils.core.setup(**attrs)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/core.py", line 147, in setup
_setup_distribution = dist = klass(attrs)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/dist.py", line 475, in __init__
_Distribution.__init__(
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 258, in __init__
getattr(self.metadata, "set_" + key)(val)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 1242, in set_classifiers
self.classifiers = _ensure_list(value, 'classifiers')
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 48, in _ensure_list
log.warning(msg)
AttributeError: module 'distutils.log' has no attribute 'warning'
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
```
</issue>
<code>
[start of setuptools/logging.py]
1 import sys
2 import logging
3 import distutils.log
4 from . import monkey
5
6
7 def _not_warning(record):
8 return record.levelno < logging.WARNING
9
10
11 def configure():
12 """
13 Configure logging to emit warning and above to stderr
14 and everything else to stdout. This behavior is provided
15 for compatibility with distutils.log but may change in
16 the future.
17 """
18 err_handler = logging.StreamHandler()
19 err_handler.setLevel(logging.WARNING)
20 out_handler = logging.StreamHandler(sys.stdout)
21 out_handler.addFilter(_not_warning)
22 handlers = err_handler, out_handler
23 logging.basicConfig(
24 format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
25 if hasattr(distutils.log, 'Log'):
26 monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
27 # For some reason `distutils.log` module is getting cached in `distutils.dist`
28 # and then loaded again when patched,
29 # implying: id(distutils.log) != id(distutils.dist.log).
30 # Make sure the same module object is used everywhere:
31 distutils.dist.log = distutils.log
32
33
34 def set_threshold(level):
35 logging.root.setLevel(level*10)
36 return set_threshold.unpatched(level)
37
[end of setuptools/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/logging.py b/setuptools/logging.py
--- a/setuptools/logging.py
+++ b/setuptools/logging.py
@@ -1,4 +1,5 @@
import sys
+import inspect
import logging
import distutils.log
from . import monkey
@@ -22,7 +23,7 @@
handlers = err_handler, out_handler
logging.basicConfig(
format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
- if hasattr(distutils.log, 'Log'):
+ if inspect.ismodule(distutils.dist.log):
monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
# For some reason `distutils.log` module is getting cached in `distutils.dist`
# and then loaded again when patched,
| {"golden_diff": "diff --git a/setuptools/logging.py b/setuptools/logging.py\n--- a/setuptools/logging.py\n+++ b/setuptools/logging.py\n@@ -1,4 +1,5 @@\n import sys\n+import inspect\n import logging\n import distutils.log\n from . import monkey\n@@ -22,7 +23,7 @@\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n- if hasattr(distutils.log, 'Log'):\n+ if inspect.ismodule(distutils.dist.log):\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n # For some reason `distutils.log` module is getting cached in `distutils.dist`\n # and then loaded again when patched,\n", "issue": "[BUG] AttributeError: module 'distutils.log' has no attribute 'warning'\n### setuptools version\n\nsetuptools==65.6.2\n\n### Python version\n\nPython 3.8.15\n\n### OS\n\nmacOS 12.5.1\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nInstalling a package `kappa=0.6.0` doesn't work. I think it could be the issue on setuptools's side.\n\n### Expected behavior\n\nInstalling the package works.\n\n### How to Reproduce\n\n```console\r\n$ python3.8 -m venv .venv\r\n$ source .venv/bin/activate\r\n$ pip install setuptools==65.6.2\r\n$ pip install wheel\r\n$ pip install kappa==0.6.0\r\n```\n\n### Output\n\n```console\r\n(.venv) dmytro@Dmytros-MacBook-Pro install_kappa % pip install kappa==0.6.0\r\nCollecting kappa==0.6.0\r\n Using cached kappa-0.6.0.tar.gz (29 kB)\r\n Preparing metadata (setup.py) ... error\r\n error: subprocess-exited-with-error\r\n\r\n \u00d7 python setup.py egg_info did not run successfully.\r\n \u2502 exit code: 1\r\n \u2570\u2500> [20 lines of output]\r\n Traceback (most recent call last):\r\n File \"<string>\", line 2, in <module>\r\n File \"<pip-setuptools-caller>\", line 34, in <module>\r\n File \"/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py\", line 54, in <module>\r\n run_setup()\r\n File \"/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py\", line 18, in run_setup\r\n setup(\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/__init__.py\", line 87, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/core.py\", line 147, in setup\r\n _setup_distribution = dist = klass(attrs)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/dist.py\", line 475, in __init__\r\n _Distribution.__init__(\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 258, in __init__\r\n getattr(self.metadata, \"set_\" + key)(val)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 1242, in set_classifiers\r\n self.classifiers = _ensure_list(value, 'classifiers')\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 48, in _ensure_list\r\n log.warning(msg)\r\n AttributeError: module 'distutils.log' has no attribute 'warning'\r\n [end of output]\r\n\r\n note: This error originates from a subprocess, and is likely not a problem with pip.\r\nerror: metadata-generation-failed\r\n\r\n\u00d7 Encountered error while generating package metadata.\r\n\u2570\u2500> See above for output.\r\n\r\nnote: This is an issue with the package mentioned above, not pip.\r\nhint: See above for details.\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport logging\nimport distutils.log\nfrom . import monkey\n\n\ndef _not_warning(record):\n return record.levelno < logging.WARNING\n\n\ndef configure():\n \"\"\"\n Configure logging to emit warning and above to stderr\n and everything else to stdout. This behavior is provided\n for compatibility with distutils.log but may change in\n the future.\n \"\"\"\n err_handler = logging.StreamHandler()\n err_handler.setLevel(logging.WARNING)\n out_handler = logging.StreamHandler(sys.stdout)\n out_handler.addFilter(_not_warning)\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n if hasattr(distutils.log, 'Log'):\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n # For some reason `distutils.log` module is getting cached in `distutils.dist`\n # and then loaded again when patched,\n # implying: id(distutils.log) != id(distutils.dist.log).\n # Make sure the same module object is used everywhere:\n distutils.dist.log = distutils.log\n\n\ndef set_threshold(level):\n logging.root.setLevel(level*10)\n return set_threshold.unpatched(level)\n", "path": "setuptools/logging.py"}]} | 1,741 | 165 |
gh_patches_debug_6045 | rasdani/github-patches | git_diff | ManimCommunity__manim-509 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueTracker must support increment also using +=
Just what the title says. Here's an example
```python
class Test(Scene):
def construct(self):
tracker = ValueTracker(0.0)
tracker.set_value(2.0)
print(tracker.get_value()) # -> 2.000
tracker.increment_value(3.0)
print(tracker.get_value()) # -> 5.000
tracker += 4.0 # NEW proposed feature
print(tracker.get_value()) # -> 9.000
```
</issue>
<code>
[start of manim/mobject/value_tracker.py]
1 """Mobjects that dynamically show the change of a variable."""
2
3 __all__ = ["ValueTracker", "ExponentialValueTracker", "ComplexValueTracker"]
4
5
6 import numpy as np
7
8 from ..utils.paths import straight_path
9 from ..mobject.mobject import Mobject
10
11
12 class ValueTracker(Mobject):
13 """A mobject that can be used for tracking (real-valued) parameters.
14 Useful for animating parameter changes.
15
16 Not meant to be displayed. Instead the position encodes some
17 number, often one which another animation or continual_animation
18 uses for its update function, and by treating it as a mobject it can
19 still be animated and manipulated just like anything else.
20
21 Examples
22 --------
23 .. manim:: ValueTrackerExample
24
25 class ValueTrackerExample(Scene):
26 def construct(self):
27 number_line = NumberLine()
28 pointer = Vector(DOWN)
29 label = MathTex("x").add_updater(lambda m: m.next_to(pointer, UP))
30
31 pointer_value = ValueTracker(0)
32 pointer.add_updater(
33 lambda m: m.next_to(
34 number_line.n2p(pointer_value.get_value()),
35 UP
36 )
37 )
38 self.add(number_line, pointer,label)
39 self.play(pointer_value.set_value, 5)
40 self.wait()
41 self.play(pointer_value.set_value, 3)
42
43 """
44
45 def __init__(self, value=0, **kwargs):
46 Mobject.__init__(self, **kwargs)
47 self.points = np.zeros((1, 3))
48 self.set_value(value)
49
50 def get_value(self):
51 return self.points[0, 0]
52
53 def set_value(self, value):
54 self.points[0, 0] = value
55 return self
56
57 def increment_value(self, d_value):
58 self.set_value(self.get_value() + d_value)
59
60 def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):
61 """
62 Turns self into an interpolation between mobject1
63 and mobject2.
64 """
65 self.points = path_func(mobject1.points, mobject2.points, alpha)
66 return self
67
68
69 class ExponentialValueTracker(ValueTracker):
70 """
71 Operates just like ValueTracker, except it encodes the value as the
72 exponential of a position coordinate, which changes how interpolation
73 behaves
74 """
75
76 def get_value(self):
77 return np.exp(ValueTracker.get_value(self))
78
79 def set_value(self, value):
80 return ValueTracker.set_value(self, np.log(value))
81
82
83 class ComplexValueTracker(ValueTracker):
84 def get_value(self):
85 return complex(*self.points[0, :2])
86
87 def set_value(self, z):
88 z = complex(z)
89 self.points[0, :2] = (z.real, z.imag)
90 return self
91
[end of manim/mobject/value_tracker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/mobject/value_tracker.py b/manim/mobject/value_tracker.py
--- a/manim/mobject/value_tracker.py
+++ b/manim/mobject/value_tracker.py
@@ -57,6 +57,10 @@
def increment_value(self, d_value):
self.set_value(self.get_value() + d_value)
+ def __iadd__(self, d_value):
+ self.increment_value(d_value)
+ return self
+
def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):
"""
Turns self into an interpolation between mobject1
| {"golden_diff": "diff --git a/manim/mobject/value_tracker.py b/manim/mobject/value_tracker.py\n--- a/manim/mobject/value_tracker.py\n+++ b/manim/mobject/value_tracker.py\n@@ -57,6 +57,10 @@\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n \n+ def __iadd__(self, d_value):\n+ self.increment_value(d_value)\n+ return self\n+\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n \"\"\"\n Turns self into an interpolation between mobject1\n", "issue": "ValueTracker must support increment also using +=\nJust what the title says. Here's an example \r\n```python\r\nclass Test(Scene):\r\n def construct(self):\r\n tracker = ValueTracker(0.0)\r\n tracker.set_value(2.0)\r\n print(tracker.get_value()) # -> 2.000\r\n tracker.increment_value(3.0)\r\n print(tracker.get_value()) # -> 5.000\r\n tracker += 4.0 # NEW proposed feature\r\n print(tracker.get_value()) # -> 9.000\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Mobjects that dynamically show the change of a variable.\"\"\"\n\n__all__ = [\"ValueTracker\", \"ExponentialValueTracker\", \"ComplexValueTracker\"]\n\n\nimport numpy as np\n\nfrom ..utils.paths import straight_path\nfrom ..mobject.mobject import Mobject\n\n\nclass ValueTracker(Mobject):\n \"\"\"A mobject that can be used for tracking (real-valued) parameters.\n Useful for animating parameter changes.\n\n Not meant to be displayed. Instead the position encodes some\n number, often one which another animation or continual_animation\n uses for its update function, and by treating it as a mobject it can\n still be animated and manipulated just like anything else.\n\n Examples\n --------\n .. manim:: ValueTrackerExample\n\n class ValueTrackerExample(Scene):\n def construct(self):\n number_line = NumberLine()\n pointer = Vector(DOWN)\n label = MathTex(\"x\").add_updater(lambda m: m.next_to(pointer, UP))\n\n pointer_value = ValueTracker(0)\n pointer.add_updater(\n lambda m: m.next_to(\n number_line.n2p(pointer_value.get_value()),\n UP\n )\n )\n self.add(number_line, pointer,label)\n self.play(pointer_value.set_value, 5)\n self.wait()\n self.play(pointer_value.set_value, 3)\n\n \"\"\"\n\n def __init__(self, value=0, **kwargs):\n Mobject.__init__(self, **kwargs)\n self.points = np.zeros((1, 3))\n self.set_value(value)\n\n def get_value(self):\n return self.points[0, 0]\n\n def set_value(self, value):\n self.points[0, 0] = value\n return self\n\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n \"\"\"\n Turns self into an interpolation between mobject1\n and mobject2.\n \"\"\"\n self.points = path_func(mobject1.points, mobject2.points, alpha)\n return self\n\n\nclass ExponentialValueTracker(ValueTracker):\n \"\"\"\n Operates just like ValueTracker, except it encodes the value as the\n exponential of a position coordinate, which changes how interpolation\n behaves\n \"\"\"\n\n def get_value(self):\n return np.exp(ValueTracker.get_value(self))\n\n def set_value(self, value):\n return ValueTracker.set_value(self, np.log(value))\n\n\nclass ComplexValueTracker(ValueTracker):\n def get_value(self):\n return complex(*self.points[0, :2])\n\n def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self\n", "path": "manim/mobject/value_tracker.py"}]} | 1,446 | 135 |
gh_patches_debug_21579 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Searching for UUIDs from the command line
### Expected behaviour
I would like to be able to search for UUIDs from the command line, e.g.:
datacube dataset search id = 26931d17-7a4e-4b55-98e7-d6777fb61df6
### Actual behaviour
If I try to run this command, `datacube` crashes with the following Exception:
```
File "/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/click.py", line 356, in my_parse
return parse_expressions(*list(value))
File "/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/expression.py", line 112, in parse_expressions
tree = expr_parser.parse(' '.join(expression_text))
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/lark.py", line 391, in parse
return self.parser.parse(text, start=start)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py", line 199, in parse
return self._parse(text, start)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py", line 53, in _parse
return self.parser.parse(input, start, *args)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/earley.py", line 293, in parse
to_scan = self._parse(stream, columns, to_scan, start_symbol)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py", line 138, in _parse
to_scan = scan(i, to_scan)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py", line 115, in scan
raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan))
lark.exceptions.UnexpectedCharacters: No terminal defined for '-' at line 1 col 14
id = 26931d17-7a4e-4b55-98e7-d6777fb61df6
^
Expecting: {'EQUAL', 'IN'}
```
### Cause
This happens because the search expression parser is treating the UUID as a number instead of a string. It's possible to work around this by surrounding the UUID in quotes. e.g.:
datacube dataset search id = \"26931d17-7a4e-4b55-98e7-d6777fb61df6\"
### Intended changes
It should be possible to improve the parser to recognise UUIDs, to avoid having to manually quote them from the shell. The search expression 'language' was intended to be shell escaping friendly, so this seems like an obvious improvement.
### Environment information
* Which ``datacube --version`` are you using?
`Open Data Cube core, version 1.8.3`
</issue>
<code>
[start of datacube/ui/expression.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """
6 Search expression parsing for command line applications.
7
8 Three types of expressions are available:
9
10 FIELD = VALUE
11 FIELD in DATE-RANGE
12 FIELD in [START, END]
13
14 Where DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD
15 and START, END are either numbers or dates.
16 """
17 # flake8: noqa
18
19 from lark import Lark, v_args, Transformer
20
21 from datacube.api.query import _time_to_search_dims
22 from datacube.model import Range
23
24
25 search_grammar = r"""
26 start: expression*
27 ?expression: equals_expr
28 | time_in_expr
29 | field_in_expr
30
31 equals_expr: field "=" value
32 time_in_expr: time "in" date_range
33 field_in_expr: field "in" "[" orderable "," orderable "]"
34
35 field: FIELD
36 time: TIME
37
38 ?orderable: INT -> integer
39 | SIGNED_NUMBER -> number
40
41 ?value: INT -> integer
42 | SIGNED_NUMBER -> number
43 | ESCAPED_STRING -> string
44 | SIMPLE_STRING -> simple_string
45 | URL_STRING -> url_string
46
47
48 ?date_range: date -> single_date
49 | "[" date "," date "]" -> date_pair
50
51 date: YEAR ["-" MONTH ["-" DAY ]]
52
53 TIME: "time"
54 FIELD: /[a-zA-Z][\w\d_]*/
55 YEAR: DIGIT ~ 4
56 MONTH: DIGIT ~ 1..2
57 DAY: DIGIT ~ 1..2
58 SIMPLE_STRING: /[a-zA-Z][\w._-]*/
59 URL_STRING: /[a-z0-9+.-]+:\/\/([:\/\w._-])*/
60
61
62 %import common.ESCAPED_STRING
63 %import common.SIGNED_NUMBER
64 %import common.INT
65 %import common.DIGIT
66 %import common.CNAME
67 %import common.WS
68 %ignore WS
69 """
70
71
72 def identity(x):
73 return x
74
75
76 @v_args(inline=True)
77 class TreeToSearchExprs(Transformer):
78 # Convert the expressions
79 def equals_expr(self, field, value):
80 return {str(field): value}
81
82 def field_in_expr(self, field, lower, upper):
83 return {str(field): Range(lower, upper)}
84
85 def time_in_expr(self, time_field, date_range):
86 return {str(time_field): date_range}
87
88 # Convert the literals
89 def string(self, val):
90 return str(val[1:-1])
91
92 simple_string = url_string = field = time = str
93 number = float
94 integer = int
95 value = identity
96
97 def single_date(self, date):
98 return _time_to_search_dims(date)
99
100 def date_pair(self, start, end):
101 return _time_to_search_dims((start, end))
102
103 def date(self, y, m=None, d=None):
104 return "-".join(x for x in [y, m, d] if x is not None)
105
106 # Merge everything into a single dict
107 def start(self, *search_exprs):
108 combined = {}
109 for expr in search_exprs:
110 combined.update(expr)
111 return combined
112
113
114 def parse_expressions(*expression_text):
115 expr_parser = Lark(search_grammar)
116 tree = expr_parser.parse(' '.join(expression_text))
117 return TreeToSearchExprs().transform(tree)
118
119
120 def main():
121 expr_parser = Lark(search_grammar)
122
123 sample_inputs = """platform = "LANDSAT_8"
124 platform = "LAND SAT_8"
125 platform = 4
126 lat in [4, 6]
127 time in [2014, 2014]
128 time in [2014-03-01, 2014-04-01]
129 time in 2014-03-02
130 time in 2014-3-2
131 time in 2014-3
132 time in 2014
133 platform = LANDSAT_8
134 lat in [4, 6] time in 2014-03-02
135 platform=LS8 lat in [-14, -23.5] instrument="OTHER"
136 """.strip().split('\n')
137
138 for sample in sample_inputs:
139 transformer = TreeToSearchExprs()
140 tree = expr_parser.parse(sample)
141
142 print(sample)
143 print(tree)
144 print(transformer.transform(tree))
145 print()
146
147
148 if __name__ == '__main__':
149 main()
150
[end of datacube/ui/expression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube/ui/expression.py b/datacube/ui/expression.py
--- a/datacube/ui/expression.py
+++ b/datacube/ui/expression.py
@@ -43,7 +43,7 @@
| ESCAPED_STRING -> string
| SIMPLE_STRING -> simple_string
| URL_STRING -> url_string
-
+ | UUID -> simple_string
?date_range: date -> single_date
| "[" date "," date "]" -> date_pair
@@ -55,14 +55,16 @@
YEAR: DIGIT ~ 4
MONTH: DIGIT ~ 1..2
DAY: DIGIT ~ 1..2
- SIMPLE_STRING: /[a-zA-Z][\w._-]*/
+ SIMPLE_STRING: /[a-zA-Z][\w._-]*/ | /[0-9]+[\w_-][\w._-]*/
URL_STRING: /[a-z0-9+.-]+:\/\/([:\/\w._-])*/
+ UUID: HEXDIGIT~8 "-" HEXDIGIT~4 "-" HEXDIGIT~4 "-" HEXDIGIT~4 "-" HEXDIGIT~12
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.INT
%import common.DIGIT
+ %import common.HEXDIGIT
%import common.CNAME
%import common.WS
%ignore WS
| {"golden_diff": "diff --git a/datacube/ui/expression.py b/datacube/ui/expression.py\n--- a/datacube/ui/expression.py\n+++ b/datacube/ui/expression.py\n@@ -43,7 +43,7 @@\n | ESCAPED_STRING -> string\n | SIMPLE_STRING -> simple_string\n | URL_STRING -> url_string\n-\n+ | UUID -> simple_string\n \n ?date_range: date -> single_date\n | \"[\" date \",\" date \"]\" -> date_pair\n@@ -55,14 +55,16 @@\n YEAR: DIGIT ~ 4\n MONTH: DIGIT ~ 1..2\n DAY: DIGIT ~ 1..2\n- SIMPLE_STRING: /[a-zA-Z][\\w._-]*/\n+ SIMPLE_STRING: /[a-zA-Z][\\w._-]*/ | /[0-9]+[\\w_-][\\w._-]*/\n URL_STRING: /[a-z0-9+.-]+:\\/\\/([:\\/\\w._-])*/\n+ UUID: HEXDIGIT~8 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~12\n \n \n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.INT\n %import common.DIGIT\n+ %import common.HEXDIGIT\n %import common.CNAME\n %import common.WS\n %ignore WS\n", "issue": "Searching for UUIDs from the command line\n### Expected behaviour\r\nI would like to be able to search for UUIDs from the command line, e.g.:\r\n\r\n datacube dataset search id = 26931d17-7a4e-4b55-98e7-d6777fb61df6\r\n\r\n### Actual behaviour\r\nIf I try to run this command, `datacube` crashes with the following Exception:\r\n\r\n```\r\n File \"/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/click.py\", line 356, in my_parse\r\n return parse_expressions(*list(value))\r\n File \"/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/expression.py\", line 112, in parse_expressions\r\n tree = expr_parser.parse(' '.join(expression_text))\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/lark.py\", line 391, in parse\r\n return self.parser.parse(text, start=start)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py\", line 199, in parse\r\n return self._parse(text, start)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py\", line 53, in _parse\r\n return self.parser.parse(input, start, *args)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/earley.py\", line 293, in parse\r\n to_scan = self._parse(stream, columns, to_scan, start_symbol)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py\", line 138, in _parse\r\n to_scan = scan(i, to_scan)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py\", line 115, in scan\r\n raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan))\r\nlark.exceptions.UnexpectedCharacters: No terminal defined for '-' at line 1 col 14\r\n\r\nid = 26931d17-7a4e-4b55-98e7-d6777fb61df6\r\n ^\r\n\r\nExpecting: {'EQUAL', 'IN'}\r\n```\r\n\r\n### Cause\r\nThis happens because the search expression parser is treating the UUID as a number instead of a string. It's possible to work around this by surrounding the UUID in quotes. e.g.:\r\n\r\n datacube dataset search id = \\\"26931d17-7a4e-4b55-98e7-d6777fb61df6\\\"\r\n\r\n\r\n### Intended changes\r\n\r\nIt should be possible to improve the parser to recognise UUIDs, to avoid having to manually quote them from the shell. The search expression 'language' was intended to be shell escaping friendly, so this seems like an obvious improvement. \r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n`Open Data Cube core, version 1.8.3`\r\n\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nSearch expression parsing for command line applications.\n\nThree types of expressions are available:\n\n FIELD = VALUE\n FIELD in DATE-RANGE\n FIELD in [START, END]\n\nWhere DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD\nand START, END are either numbers or dates.\n\"\"\"\n# flake8: noqa\n\nfrom lark import Lark, v_args, Transformer\n\nfrom datacube.api.query import _time_to_search_dims\nfrom datacube.model import Range\n\n\nsearch_grammar = r\"\"\"\n start: expression*\n ?expression: equals_expr\n | time_in_expr\n | field_in_expr\n\n equals_expr: field \"=\" value\n time_in_expr: time \"in\" date_range\n field_in_expr: field \"in\" \"[\" orderable \",\" orderable \"]\"\n\n field: FIELD\n time: TIME\n\n ?orderable: INT -> integer\n | SIGNED_NUMBER -> number\n\n ?value: INT -> integer\n | SIGNED_NUMBER -> number\n | ESCAPED_STRING -> string\n | SIMPLE_STRING -> simple_string\n | URL_STRING -> url_string\n\n\n ?date_range: date -> single_date\n | \"[\" date \",\" date \"]\" -> date_pair\n\n date: YEAR [\"-\" MONTH [\"-\" DAY ]]\n\n TIME: \"time\"\n FIELD: /[a-zA-Z][\\w\\d_]*/\n YEAR: DIGIT ~ 4\n MONTH: DIGIT ~ 1..2\n DAY: DIGIT ~ 1..2\n SIMPLE_STRING: /[a-zA-Z][\\w._-]*/\n URL_STRING: /[a-z0-9+.-]+:\\/\\/([:\\/\\w._-])*/\n\n\n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.INT\n %import common.DIGIT\n %import common.CNAME\n %import common.WS\n %ignore WS\n\"\"\"\n\n\ndef identity(x):\n return x\n\n\n@v_args(inline=True)\nclass TreeToSearchExprs(Transformer):\n # Convert the expressions\n def equals_expr(self, field, value):\n return {str(field): value}\n\n def field_in_expr(self, field, lower, upper):\n return {str(field): Range(lower, upper)}\n\n def time_in_expr(self, time_field, date_range):\n return {str(time_field): date_range}\n\n # Convert the literals\n def string(self, val):\n return str(val[1:-1])\n\n simple_string = url_string = field = time = str\n number = float\n integer = int\n value = identity\n\n def single_date(self, date):\n return _time_to_search_dims(date)\n\n def date_pair(self, start, end):\n return _time_to_search_dims((start, end))\n\n def date(self, y, m=None, d=None):\n return \"-\".join(x for x in [y, m, d] if x is not None)\n\n # Merge everything into a single dict\n def start(self, *search_exprs):\n combined = {}\n for expr in search_exprs:\n combined.update(expr)\n return combined\n\n\ndef parse_expressions(*expression_text):\n expr_parser = Lark(search_grammar)\n tree = expr_parser.parse(' '.join(expression_text))\n return TreeToSearchExprs().transform(tree)\n\n\ndef main():\n expr_parser = Lark(search_grammar)\n\n sample_inputs = \"\"\"platform = \"LANDSAT_8\"\n platform = \"LAND SAT_8\"\n platform = 4\n lat in [4, 6]\n time in [2014, 2014]\n time in [2014-03-01, 2014-04-01]\n time in 2014-03-02\n time in 2014-3-2\n time in 2014-3\n time in 2014\n platform = LANDSAT_8\n lat in [4, 6] time in 2014-03-02\n platform=LS8 lat in [-14, -23.5] instrument=\"OTHER\"\n \"\"\".strip().split('\\n')\n\n for sample in sample_inputs:\n transformer = TreeToSearchExprs()\n tree = expr_parser.parse(sample)\n\n print(sample)\n print(tree)\n print(transformer.transform(tree))\n print()\n\n\nif __name__ == '__main__':\n main()\n", "path": "datacube/ui/expression.py"}]} | 2,745 | 316 |
gh_patches_debug_3649 | rasdani/github-patches | git_diff | python-poetry__poetry-604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Building sdist is slow with wide exclude pattern
<!--
Hi there! Thank you for wanting to make Poetry better.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
## Issue
I'm working on a project with the following `pyproject.toml`: https://gist.github.com/Lothiraldan/2c0b5ce0171e8450490e3b493e7c2960 and I want to ship a React project inside my package.
Include and exclude are working great, far easier to configure than `MANIFEST.IN` IMHO.
My issue is that the builder code is walking all of the directories and for each file check that it's not in the excluded list. One of my exclude pattern is `"balto/web_interfaces/balto_react/node_modules/**/*"` which generates a lot of matching files. The length of the excluded file list is `28761` in my case.
This makes the following line https://github.com/sdispater/poetry/blob/master/poetry/masonry/builders/sdist.py#L281 quite slow. A build takes about 4 minutes on my laptop.
Here is a `py-spy` dump of the process:
```
Collecting samples from 'pid: 31302' (python v3.6.6)
Total Samples 5100
GIL: 0.00%, Active: 95.50%, Threads: 1
%Own %Total OwnTime TotalTime Function (filename:line)
38.00% 51.00% 10.55s 14.14s __eq__ (4/python3.6/pathlib.py:736)
29.00% 93.00% 6.64s 24.87s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:281)
16.50% 16.50% 4.38s 4.38s _cparts (4/python3.6/pathlib.py:728)
7.50% 11.00% 2.81s 3.65s __eq__ (4/python3.6/pathlib.py:734)
1.50% 1.50% 0.015s 0.015s run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:104)
1.00% 1.00% 0.130s 0.130s _cparts (4/python3.6/pathlib.py:724)
1.00% 2.00% 0.315s 0.435s __eq__ (4/python3.6/pathlib.py:733)
0.50% 0.50% 0.025s 0.035s parse_parts (4/python3.6/pathlib.py:87)
0.50% 0.50% 0.165s 0.180s wrapped (4/python3.6/pathlib.py:387)
0.00% 1.00% 0.000s 0.355s find_packages (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:277)
0.00% 0.00% 0.030s 0.030s _get_sep (4/python3.6/posixpath.py:45)
0.00% 94.00% 0.000s 25.36s execute (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/commands/command.py:107)
0.00% 0.00% 0.010s 0.075s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:276)
0.00% 0.00% 0.025s 0.025s _select_from (4/python3.6/pathlib.py:529)
0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:197)
0.00% 94.00% 0.000s 25.36s build (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builder.py:21)
0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/console/application.py:88)
Press Control-C to quit, or ? for help.
```
I have some ideas about how to make it faster, I will send some patches if that's okay.
</issue>
<code>
[start of poetry/masonry/builders/builder.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import re
4 import shutil
5 import tempfile
6
7 from collections import defaultdict
8 from contextlib import contextmanager
9
10 from poetry.utils._compat import Path
11 from poetry.vcs import get_vcs
12
13 from ..metadata import Metadata
14 from ..utils.module import Module
15 from ..utils.package_include import PackageInclude
16
17
18 AUTHOR_REGEX = re.compile(r"(?u)^(?P<name>[- .,\w\d'’\"()]+) <(?P<email>.+?)>$")
19
20
21 class Builder(object):
22
23 AVAILABLE_PYTHONS = {"2", "2.7", "3", "3.4", "3.5", "3.6", "3.7"}
24
25 def __init__(self, poetry, env, io):
26 self._poetry = poetry
27 self._env = env
28 self._io = io
29 self._package = poetry.package
30 self._path = poetry.file.parent
31 self._module = Module(
32 self._package.name,
33 self._path.as_posix(),
34 packages=self._package.packages,
35 includes=self._package.include,
36 )
37 self._meta = Metadata.from_package(self._package)
38
39 def build(self):
40 raise NotImplementedError()
41
42 def find_excluded_files(self): # type: () -> list
43 # Checking VCS
44 vcs = get_vcs(self._path)
45 if not vcs:
46 vcs_ignored_files = []
47 else:
48 vcs_ignored_files = vcs.get_ignored_files()
49
50 explicitely_excluded = []
51 for excluded_glob in self._package.exclude:
52 for excluded in self._path.glob(excluded_glob):
53 explicitely_excluded.append(excluded)
54
55 ignored = vcs_ignored_files + explicitely_excluded
56 result = []
57 for file in ignored:
58 try:
59 file = Path(file).absolute().relative_to(self._path)
60 except ValueError:
61 # Should only happen in tests
62 continue
63
64 result.append(file)
65
66 return result
67
68 def find_files_to_add(self, exclude_build=True): # type: () -> list
69 """
70 Finds all files to add to the tarball
71 """
72 excluded = self.find_excluded_files()
73 to_add = []
74
75 for include in self._module.includes:
76 for file in include.elements:
77 if "__pycache__" in str(file):
78 continue
79
80 if file.is_dir():
81 continue
82
83 file = file.relative_to(self._path)
84
85 if file in excluded and isinstance(include, PackageInclude):
86 continue
87
88 if file.suffix == ".pyc":
89 continue
90
91 if file in to_add:
92 # Skip duplicates
93 continue
94
95 self._io.writeln(
96 " - Adding: <comment>{}</comment>".format(str(file)),
97 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
98 )
99 to_add.append(file)
100
101 # Include project files
102 self._io.writeln(
103 " - Adding: <comment>pyproject.toml</comment>",
104 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
105 )
106 to_add.append(Path("pyproject.toml"))
107
108 # If a license file exists, add it
109 for license_file in self._path.glob("LICENSE*"):
110 self._io.writeln(
111 " - Adding: <comment>{}</comment>".format(
112 license_file.relative_to(self._path)
113 ),
114 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
115 )
116 to_add.append(license_file.relative_to(self._path))
117
118 # If a README is specificed we need to include it
119 # to avoid errors
120 if "readme" in self._poetry.local_config:
121 readme = self._path / self._poetry.local_config["readme"]
122 if readme.exists():
123 self._io.writeln(
124 " - Adding: <comment>{}</comment>".format(
125 readme.relative_to(self._path)
126 ),
127 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
128 )
129 to_add.append(readme.relative_to(self._path))
130
131 # If a build script is specified and explicitely required
132 # we add it to the list of files
133 if self._package.build and not exclude_build:
134 to_add.append(Path(self._package.build))
135
136 return sorted(to_add)
137
138 def convert_entry_points(self): # type: () -> dict
139 result = defaultdict(list)
140
141 # Scripts -> Entry points
142 for name, ep in self._poetry.local_config.get("scripts", {}).items():
143 extras = ""
144 if isinstance(ep, dict):
145 extras = "[{}]".format(", ".join(ep["extras"]))
146 ep = ep["callable"]
147
148 result["console_scripts"].append("{} = {}{}".format(name, ep, extras))
149
150 # Plugins -> entry points
151 plugins = self._poetry.local_config.get("plugins", {})
152 for groupname, group in plugins.items():
153 for name, ep in sorted(group.items()):
154 result[groupname].append("{} = {}".format(name, ep))
155
156 for groupname in result:
157 result[groupname] = sorted(result[groupname])
158
159 return dict(result)
160
161 @classmethod
162 def convert_author(cls, author): # type: () -> dict
163 m = AUTHOR_REGEX.match(author)
164
165 name = m.group("name")
166 email = m.group("email")
167
168 return {"name": name, "email": email}
169
170 @classmethod
171 @contextmanager
172 def temporary_directory(cls, *args, **kwargs):
173 try:
174 from tempfile import TemporaryDirectory
175
176 with TemporaryDirectory(*args, **kwargs) as name:
177 yield name
178 except ImportError:
179 name = tempfile.mkdtemp(*args, **kwargs)
180
181 yield name
182
183 shutil.rmtree(name)
184
[end of poetry/masonry/builders/builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/masonry/builders/builder.py b/poetry/masonry/builders/builder.py
--- a/poetry/masonry/builders/builder.py
+++ b/poetry/masonry/builders/builder.py
@@ -63,7 +63,10 @@
result.append(file)
- return result
+ # The list of excluded files might be big and we will do a lot
+ # containment check (x in excluded).
+ # Returning a set make those tests much much faster.
+ return set(result)
def find_files_to_add(self, exclude_build=True): # type: () -> list
"""
| {"golden_diff": "diff --git a/poetry/masonry/builders/builder.py b/poetry/masonry/builders/builder.py\n--- a/poetry/masonry/builders/builder.py\n+++ b/poetry/masonry/builders/builder.py\n@@ -63,7 +63,10 @@\n \n result.append(file)\n \n- return result\n+ # The list of excluded files might be big and we will do a lot\n+ # containment check (x in excluded).\n+ # Returning a set make those tests much much faster.\n+ return set(result)\n \n def find_files_to_add(self, exclude_build=True): # type: () -> list\n \"\"\"\n", "issue": "Building sdist is slow with wide exclude pattern\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n## Issue\r\n\r\nI'm working on a project with the following `pyproject.toml`: https://gist.github.com/Lothiraldan/2c0b5ce0171e8450490e3b493e7c2960 and I want to ship a React project inside my package.\r\n\r\nInclude and exclude are working great, far easier to configure than `MANIFEST.IN` IMHO.\r\n\r\nMy issue is that the builder code is walking all of the directories and for each file check that it's not in the excluded list. One of my exclude pattern is `\"balto/web_interfaces/balto_react/node_modules/**/*\"` which generates a lot of matching files. The length of the excluded file list is `28761` in my case.\r\n\r\nThis makes the following line https://github.com/sdispater/poetry/blob/master/poetry/masonry/builders/sdist.py#L281 quite slow. A build takes about 4 minutes on my laptop.\r\n\r\nHere is a `py-spy` dump of the process:\r\n```\r\nCollecting samples from 'pid: 31302' (python v3.6.6)\r\nTotal Samples 5100\r\nGIL: 0.00%, Active: 95.50%, Threads: 1\r\n\r\n %Own %Total OwnTime TotalTime Function (filename:line) \r\n 38.00% 51.00% 10.55s 14.14s __eq__ (4/python3.6/pathlib.py:736)\r\n 29.00% 93.00% 6.64s 24.87s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:281)\r\n 16.50% 16.50% 4.38s 4.38s _cparts (4/python3.6/pathlib.py:728)\r\n 7.50% 11.00% 2.81s 3.65s __eq__ (4/python3.6/pathlib.py:734)\r\n 1.50% 1.50% 0.015s 0.015s run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:104)\r\n 1.00% 1.00% 0.130s 0.130s _cparts (4/python3.6/pathlib.py:724)\r\n 1.00% 2.00% 0.315s 0.435s __eq__ (4/python3.6/pathlib.py:733)\r\n 0.50% 0.50% 0.025s 0.035s parse_parts (4/python3.6/pathlib.py:87)\r\n 0.50% 0.50% 0.165s 0.180s wrapped (4/python3.6/pathlib.py:387)\r\n 0.00% 1.00% 0.000s 0.355s find_packages (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:277)\r\n 0.00% 0.00% 0.030s 0.030s _get_sep (4/python3.6/posixpath.py:45)\r\n 0.00% 94.00% 0.000s 25.36s execute (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/commands/command.py:107)\r\n 0.00% 0.00% 0.010s 0.075s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:276)\r\n 0.00% 0.00% 0.025s 0.025s _select_from (4/python3.6/pathlib.py:529)\r\n 0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:197)\r\n 0.00% 94.00% 0.000s 25.36s build (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builder.py:21)\r\n 0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/console/application.py:88)\r\n\r\nPress Control-C to quit, or ? for help.\r\n```\r\n\r\nI have some ideas about how to make it faster, I will send some patches if that's okay.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport tempfile\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom poetry.utils._compat import Path\nfrom poetry.vcs import get_vcs\n\nfrom ..metadata import Metadata\nfrom ..utils.module import Module\nfrom ..utils.package_include import PackageInclude\n\n\nAUTHOR_REGEX = re.compile(r\"(?u)^(?P<name>[- .,\\w\\d'\u2019\\\"()]+) <(?P<email>.+?)>$\")\n\n\nclass Builder(object):\n\n AVAILABLE_PYTHONS = {\"2\", \"2.7\", \"3\", \"3.4\", \"3.5\", \"3.6\", \"3.7\"}\n\n def __init__(self, poetry, env, io):\n self._poetry = poetry\n self._env = env\n self._io = io\n self._package = poetry.package\n self._path = poetry.file.parent\n self._module = Module(\n self._package.name,\n self._path.as_posix(),\n packages=self._package.packages,\n includes=self._package.include,\n )\n self._meta = Metadata.from_package(self._package)\n\n def build(self):\n raise NotImplementedError()\n\n def find_excluded_files(self): # type: () -> list\n # Checking VCS\n vcs = get_vcs(self._path)\n if not vcs:\n vcs_ignored_files = []\n else:\n vcs_ignored_files = vcs.get_ignored_files()\n\n explicitely_excluded = []\n for excluded_glob in self._package.exclude:\n for excluded in self._path.glob(excluded_glob):\n explicitely_excluded.append(excluded)\n\n ignored = vcs_ignored_files + explicitely_excluded\n result = []\n for file in ignored:\n try:\n file = Path(file).absolute().relative_to(self._path)\n except ValueError:\n # Should only happen in tests\n continue\n\n result.append(file)\n\n return result\n\n def find_files_to_add(self, exclude_build=True): # type: () -> list\n \"\"\"\n Finds all files to add to the tarball\n \"\"\"\n excluded = self.find_excluded_files()\n to_add = []\n\n for include in self._module.includes:\n for file in include.elements:\n if \"__pycache__\" in str(file):\n continue\n\n if file.is_dir():\n continue\n\n file = file.relative_to(self._path)\n\n if file in excluded and isinstance(include, PackageInclude):\n continue\n\n if file.suffix == \".pyc\":\n continue\n\n if file in to_add:\n # Skip duplicates\n continue\n\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(str(file)),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(file)\n\n # Include project files\n self._io.writeln(\n \" - Adding: <comment>pyproject.toml</comment>\",\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(Path(\"pyproject.toml\"))\n\n # If a license file exists, add it\n for license_file in self._path.glob(\"LICENSE*\"):\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n license_file.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(license_file.relative_to(self._path))\n\n # If a README is specificed we need to include it\n # to avoid errors\n if \"readme\" in self._poetry.local_config:\n readme = self._path / self._poetry.local_config[\"readme\"]\n if readme.exists():\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n readme.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(readme.relative_to(self._path))\n\n # If a build script is specified and explicitely required\n # we add it to the list of files\n if self._package.build and not exclude_build:\n to_add.append(Path(self._package.build))\n\n return sorted(to_add)\n\n def convert_entry_points(self): # type: () -> dict\n result = defaultdict(list)\n\n # Scripts -> Entry points\n for name, ep in self._poetry.local_config.get(\"scripts\", {}).items():\n extras = \"\"\n if isinstance(ep, dict):\n extras = \"[{}]\".format(\", \".join(ep[\"extras\"]))\n ep = ep[\"callable\"]\n\n result[\"console_scripts\"].append(\"{} = {}{}\".format(name, ep, extras))\n\n # Plugins -> entry points\n plugins = self._poetry.local_config.get(\"plugins\", {})\n for groupname, group in plugins.items():\n for name, ep in sorted(group.items()):\n result[groupname].append(\"{} = {}\".format(name, ep))\n\n for groupname in result:\n result[groupname] = sorted(result[groupname])\n\n return dict(result)\n\n @classmethod\n def convert_author(cls, author): # type: () -> dict\n m = AUTHOR_REGEX.match(author)\n\n name = m.group(\"name\")\n email = m.group(\"email\")\n\n return {\"name\": name, \"email\": email}\n\n @classmethod\n @contextmanager\n def temporary_directory(cls, *args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n", "path": "poetry/masonry/builders/builder.py"}]} | 3,703 | 147 |
gh_patches_debug_32578 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tornado <6.0 still tries to instrument (and fails with cryptic warning)
Flower version is flower==0.9.3
elastic-apm==6.7.2
Flower service is not working for elastic-apm==6.7.2.
Can you please suggest which version is compatible for flower==0.9.3.
</issue>
<code>
[start of elasticapm/instrumentation/packages/tornado.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """
31 Instrumentation for Tornado
32 """
33 import elasticapm
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
36 from elasticapm.traces import capture_span
37 from elasticapm.utils.disttracing import TraceParent
38
39
40 class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
41 name = "tornado_request_execute"
42 creates_transactions = True
43 instrument_list = [("tornado.web", "RequestHandler._execute")]
44
45 async def call(self, module, method, wrapped, instance, args, kwargs):
46 if not hasattr(instance.application, "elasticapm_client"):
47 # If tornado was instrumented but not as the main framework
48 # (i.e. in Flower), we should skip it.
49 return await wrapped(*args, **kwargs)
50
51 # Late import to avoid ImportErrors
52 from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
53
54 request = instance.request
55 client = instance.application.elasticapm_client
56 should_ignore = client.should_ignore_url(request.path)
57 if not should_ignore:
58 trace_parent = TraceParent.from_headers(request.headers)
59 client.begin_transaction("request", trace_parent=trace_parent)
60 elasticapm.set_context(
61 lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request"
62 )
63 # TODO: Can we somehow incorporate the routing rule itself here?
64 elasticapm.set_transaction_name("{} {}".format(request.method, type(instance).__name__), override=False)
65
66 ret = await wrapped(*args, **kwargs)
67
68 if not should_ignore:
69 elasticapm.set_context(
70 lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), "response"
71 )
72 status = instance.get_status()
73 result = "HTTP {}xx".format(status // 100)
74 elasticapm.set_transaction_result(result, override=False)
75 elasticapm.set_transaction_outcome(http_status_code=status)
76 client.end_transaction()
77
78 return ret
79
80
81 class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
82 name = "tornado_handle_request_exception"
83
84 instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
85
86 def call(self, module, method, wrapped, instance, args, kwargs):
87 if not hasattr(instance.application, "elasticapm_client"):
88 # If tornado was instrumented but not as the main framework
89 # (i.e. in Flower), we should skip it.
90 return wrapped(*args, **kwargs)
91
92 # Late import to avoid ImportErrors
93 from tornado.web import Finish, HTTPError
94
95 from elasticapm.contrib.tornado.utils import get_data_from_request
96
97 e = args[0]
98 if isinstance(e, Finish):
99 # Not an error; Finish is an exception that ends a request without an error response
100 return wrapped(*args, **kwargs)
101
102 client = instance.application.elasticapm_client
103 request = instance.request
104 client.capture_exception(
105 context={"request": get_data_from_request(instance, request, client.config, constants.ERROR)}
106 )
107 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)
108 if isinstance(e, HTTPError):
109 elasticapm.set_transaction_result("HTTP {}xx".format(int(e.status_code / 100)), override=False)
110 elasticapm.set_context({"status_code": e.status_code}, "response")
111 else:
112 elasticapm.set_transaction_result("HTTP 5xx", override=False)
113 elasticapm.set_context({"status_code": 500}, "response")
114
115 return wrapped(*args, **kwargs)
116
117
118 class TornadoRenderInstrumentation(AbstractInstrumentedModule):
119 name = "tornado_render"
120
121 instrument_list = [("tornado.web", "RequestHandler.render")]
122
123 def call(self, module, method, wrapped, instance, args, kwargs):
124 if "template_name" in kwargs:
125 name = kwargs["template_name"]
126 else:
127 name = args[0]
128
129 with capture_span(name, span_type="template", span_subtype="tornado", span_action="render"):
130 return wrapped(*args, **kwargs)
131
[end of elasticapm/instrumentation/packages/tornado.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py
--- a/elasticapm/instrumentation/packages/tornado.py
+++ b/elasticapm/instrumentation/packages/tornado.py
@@ -35,9 +35,25 @@
from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
from elasticapm.traces import capture_span
from elasticapm.utils.disttracing import TraceParent
+from elasticapm.utils.logging import get_logger
+logger = get_logger("elasticapm.instrument")
-class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
+
+class TornadoBaseInstrumentedModule(AbstractInstrumentedModule):
+ def instrument(self):
+ try:
+ import tornado
+
+ if tornado.version_info[0] < 6:
+ logger.debug("Skipping instrumentation of %s. Tornado is only supported with version 6.0+", self.name)
+ return
+ except ImportError:
+ pass
+ super().instrument()
+
+
+class TornadoRequestExecuteInstrumentation(TornadoBaseInstrumentedModule, AsyncAbstractInstrumentedModule):
name = "tornado_request_execute"
creates_transactions = True
instrument_list = [("tornado.web", "RequestHandler._execute")]
@@ -78,7 +94,7 @@
return ret
-class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
+class TornadoHandleRequestExceptionInstrumentation(TornadoBaseInstrumentedModule):
name = "tornado_handle_request_exception"
instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
@@ -115,7 +131,7 @@
return wrapped(*args, **kwargs)
-class TornadoRenderInstrumentation(AbstractInstrumentedModule):
+class TornadoRenderInstrumentation(TornadoBaseInstrumentedModule):
name = "tornado_render"
instrument_list = [("tornado.web", "RequestHandler.render")]
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py\n--- a/elasticapm/instrumentation/packages/tornado.py\n+++ b/elasticapm/instrumentation/packages/tornado.py\n@@ -35,9 +35,25 @@\n from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\n from elasticapm.traces import capture_span\n from elasticapm.utils.disttracing import TraceParent\n+from elasticapm.utils.logging import get_logger\n \n+logger = get_logger(\"elasticapm.instrument\")\n \n-class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n+\n+class TornadoBaseInstrumentedModule(AbstractInstrumentedModule):\n+ def instrument(self):\n+ try:\n+ import tornado\n+\n+ if tornado.version_info[0] < 6:\n+ logger.debug(\"Skipping instrumentation of %s. Tornado is only supported with version 6.0+\", self.name)\n+ return\n+ except ImportError:\n+ pass\n+ super().instrument()\n+\n+\n+class TornadoRequestExecuteInstrumentation(TornadoBaseInstrumentedModule, AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n@@ -78,7 +94,7 @@\n return ret\n \n \n-class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n+class TornadoHandleRequestExceptionInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n \n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n@@ -115,7 +131,7 @@\n return wrapped(*args, **kwargs)\n \n \n-class TornadoRenderInstrumentation(AbstractInstrumentedModule):\n+class TornadoRenderInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_render\"\n \n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n", "issue": "Tornado <6.0 still tries to instrument (and fails with cryptic warning)\nFlower version is flower==0.9.3\r\nelastic-apm==6.7.2\r\n\r\nFlower service is not working for elastic-apm==6.7.2. \r\nCan you please suggest which version is compatible for flower==0.9.3.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nInstrumentation for Tornado\n\"\"\"\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return await wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n\n request = instance.request\n client = instance.application.elasticapm_client\n should_ignore = client.should_ignore_url(request.path)\n if not should_ignore:\n trace_parent = TraceParent.from_headers(request.headers)\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n elasticapm.set_context(\n lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), \"request\"\n )\n # TODO: Can we somehow incorporate the routing rule itself here?\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, type(instance).__name__), override=False)\n\n ret = await wrapped(*args, **kwargs)\n\n if not should_ignore:\n elasticapm.set_context(\n lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), \"response\"\n )\n status = instance.get_status()\n result = \"HTTP {}xx\".format(status // 100)\n elasticapm.set_transaction_result(result, override=False)\n elasticapm.set_transaction_outcome(http_status_code=status)\n client.end_transaction()\n\n return ret\n\n\nclass TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n\n from elasticapm.contrib.tornado.utils import get_data_from_request\n\n e = args[0]\n if isinstance(e, Finish):\n # Not an error; Finish is an exception that ends a request without an error response\n return wrapped(*args, **kwargs)\n\n client = instance.application.elasticapm_client\n request = instance.request\n client.capture_exception(\n context={\"request\": get_data_from_request(instance, request, client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)\n if isinstance(e, HTTPError):\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(int(e.status_code / 100)), override=False)\n elasticapm.set_context({\"status_code\": e.status_code}, \"response\")\n else:\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n return wrapped(*args, **kwargs)\n\n\nclass TornadoRenderInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_render\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"template_name\" in kwargs:\n name = kwargs[\"template_name\"]\n else:\n name = args[0]\n\n with capture_span(name, span_type=\"template\", span_subtype=\"tornado\", span_action=\"render\"):\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/tornado.py"}]} | 2,144 | 438 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.