problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_8445 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
L'affichage des pseudos dans la liste des tutoriels / article déconne
L'affichage du pseudo "Bat'" n'est pas correct.

Possible de voir le comportement sur la page: https://zestedesavoir.com/tutoriels/?tag=dot-net
</issue>
<code>
[start of zds/utils/templatetags/captureas.py]
1 # -*- coding: utf-8 -*-
2
3 from django import template
4
5 register = template.Library()
6
7 """
8 Define a tag allowing to capture template content as a variable.
9 """
10
11
12 @register.tag(name='captureas')
13 def do_captureas(parser, token):
14 """
15 Define a tag allowing to capture template content as a variable.
16
17 :param parser: The django template parser
18 :param token: tag token (tag_name + variable_name)
19 :return: Template node.
20 """
21
22 try:
23 _, variable_name = token.split_contents()
24 except ValueError:
25 raise template.TemplateSyntaxError("'captureas' node requires a variable name.")
26
27 nodelist = parser.parse(('endcaptureas',))
28 parser.delete_first_token()
29
30 return CaptureasNode(nodelist, variable_name)
31
32
33 class CaptureasNode(template.Node):
34 """
35 Capture end render node content to a variable name.
36 """
37
38 def __init__(self, nodelist, variable_name):
39 """
40 Create a template node which render `nodelist` to `variable_name`.
41
42 :param nodelist: The node list to capture.
43 :param variable_name: The variable name which will gain the rendered content.
44 """
45 self.__node_list = nodelist
46 self.__variable_name = variable_name
47
48 def render(self, context):
49 """
50 Render the node list to the variable name.
51
52 :param context: Current context.
53 :return: Empty string
54 :rtype: str
55 """
56 output = self.__node_list.render(context)
57 context[self.__variable_name] = output.strip()
58 return ''
59
[end of zds/utils/templatetags/captureas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/captureas.py b/zds/utils/templatetags/captureas.py
--- a/zds/utils/templatetags/captureas.py
+++ b/zds/utils/templatetags/captureas.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from django import template
+from django.utils.safestring import mark_safe
register = template.Library()
@@ -54,5 +55,5 @@
:rtype: str
"""
output = self.__node_list.render(context)
- context[self.__variable_name] = output.strip()
+ context[self.__variable_name] = mark_safe(output.strip())
return ''
| {"golden_diff": "diff --git a/zds/utils/templatetags/captureas.py b/zds/utils/templatetags/captureas.py\n--- a/zds/utils/templatetags/captureas.py\n+++ b/zds/utils/templatetags/captureas.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from django import template\n+from django.utils.safestring import mark_safe\n \n register = template.Library()\n \n@@ -54,5 +55,5 @@\n :rtype: str\n \"\"\"\n output = self.__node_list.render(context)\n- context[self.__variable_name] = output.strip()\n+ context[self.__variable_name] = mark_safe(output.strip())\n return ''\n", "issue": "L'affichage des pseudos dans la liste des tutoriels / article d\u00e9conne\nL'affichage du pseudo \"Bat'\" n'est pas correct.\n\n\n\nPossible de voir le comportement sur la page: https://zestedesavoir.com/tutoriels/?tag=dot-net\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\n\nregister = template.Library()\n\n\"\"\"\nDefine a tag allowing to capture template content as a variable.\n\"\"\"\n\n\[email protected](name='captureas')\ndef do_captureas(parser, token):\n \"\"\"\n Define a tag allowing to capture template content as a variable.\n\n :param parser: The django template parser\n :param token: tag token (tag_name + variable_name)\n :return: Template node.\n \"\"\"\n\n try:\n _, variable_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"'captureas' node requires a variable name.\")\n\n nodelist = parser.parse(('endcaptureas',))\n parser.delete_first_token()\n\n return CaptureasNode(nodelist, variable_name)\n\n\nclass CaptureasNode(template.Node):\n \"\"\"\n Capture end render node content to a variable name.\n \"\"\"\n\n def __init__(self, nodelist, variable_name):\n \"\"\"\n Create a template node which render `nodelist` to `variable_name`.\n\n :param nodelist: The node list to capture.\n :param variable_name: The variable name which will gain the rendered content.\n \"\"\"\n self.__node_list = nodelist\n self.__variable_name = variable_name\n\n def render(self, context):\n \"\"\"\n Render the node list to the variable name.\n\n :param context: Current context.\n :return: Empty string\n :rtype: str\n \"\"\"\n output = self.__node_list.render(context)\n context[self.__variable_name] = output.strip()\n return ''\n", "path": "zds/utils/templatetags/captureas.py"}]} | 1,117 | 160 |
gh_patches_debug_23627 | rasdani/github-patches | git_diff | internetarchive__openlibrary-5428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create `i18n-messages validate` script to check for presence of fuzzy flags
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
When `i18n-messages update` is executed, some translations in `.po` files will have a `fuzzy` annotation. This means that the translation may not be completely accurate, and should be manually approved by a translator. The last step of the `i18n-messages update` flow compiles all `.po` files into `.mo` files, regardless of the presence of fuzzy translations.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
Add validation step to i18n flow, in between the update and compile steps. Validation step will read each `.po` file, searching for lines that begin with `#, fuzzy`. If any are found in a `.po` file, that file should not be compiled.
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
This is a sub-task of #5134.
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@cdrini
</issue>
<code>
[start of openlibrary/i18n/__init__.py]
1 from __future__ import print_function
2
3 import sys
4 from typing import List
5
6 import web
7 import os
8 import shutil
9
10 import babel
11 from babel._compat import BytesIO
12 from babel.support import Translations
13 from babel.messages import Catalog
14 from babel.messages.pofile import read_po, write_po
15 from babel.messages.mofile import write_mo
16 from babel.messages.extract import extract_from_file, extract_from_dir, extract_python
17
18 root = os.path.dirname(__file__)
19
20 def _compile_translation(po, mo):
21 try:
22 catalog = read_po(open(po, 'rb'))
23
24 f = open(mo, 'wb')
25 write_mo(f, catalog)
26 f.close()
27 print('compiled', po, file=web.debug)
28 except Exception as e:
29 print('failed to compile', po, file=web.debug)
30 raise e
31
32
33 def get_locales():
34 return [
35 d
36 for d in os.listdir(root)
37 if (os.path.isdir(os.path.join(root, d)) and
38 os.path.exists(os.path.join(root, d, 'messages.po')))
39 ]
40
41 def extract_templetor(fileobj, keywords, comment_tags, options):
42 """Extract i18n messages from web.py templates."""
43 try:
44 instring = fileobj.read().decode('utf-8')
45 # Replace/remove inline js '\$' which interferes with the Babel python parser:
46 cleaned_string = instring.replace('\$', '')
47 code = web.template.Template.generate_code(cleaned_string, fileobj.name)
48 f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings
49 except Exception as e:
50 print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)
51 return []
52 return extract_python(f, keywords, comment_tags, options)
53
54
55 def extract_messages(dirs: List[str]):
56 catalog = Catalog(
57 project='Open Library',
58 copyright_holder='Internet Archive'
59 )
60 METHODS = [
61 ("**.py", "python"),
62 ("**.html", "openlibrary.i18n:extract_templetor")
63 ]
64 COMMENT_TAGS = ["NOTE:"]
65
66 for d in dirs:
67 extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS,
68 strip_comment_tags=True)
69
70 counts = {}
71 for filename, lineno, message, comments, context in extracted:
72 counts[filename] = counts.get(filename, 0) + 1
73 catalog.add(message, None, [(filename, lineno)], auto_comments=comments)
74
75 for filename, count in counts.items():
76 path = filename if d == filename else os.path.join(d, filename)
77 print(f"{count}\t{path}", file=sys.stderr)
78
79 path = os.path.join(root, 'messages.pot')
80 f = open(path, 'wb')
81 write_po(f, catalog)
82 f.close()
83
84 print('wrote template to', path)
85
86 def compile_translations():
87 for locale in get_locales():
88 po_path = os.path.join(root, locale, 'messages.po')
89 mo_path = os.path.join(root, locale, 'messages.mo')
90
91 if os.path.exists(po_path):
92 _compile_translation(po_path, mo_path)
93
94 def update_translations():
95 pot_path = os.path.join(root, 'messages.pot')
96 template = read_po(open(pot_path, 'rb'))
97
98 for locale in get_locales():
99 po_path = os.path.join(root, locale, 'messages.po')
100 mo_path = os.path.join(root, locale, 'messages.mo')
101
102 if os.path.exists(po_path):
103 catalog = read_po(open(po_path, 'rb'))
104 catalog.update(template)
105
106 f = open(po_path, 'wb')
107 write_po(f, catalog)
108 f.close()
109 print('updated', po_path)
110
111 compile_translations()
112
113
114 def generate_po(args):
115 if args:
116 po_dir = os.path.join(root, args[0])
117 pot_src = os.path.join(root, 'messages.pot')
118 po_dest = os.path.join(po_dir, 'messages.po')
119
120 if os.path.exists(po_dir):
121 if os.path.exists(po_dest):
122 print(f"Portable object file already exists at {po_dest}")
123 else:
124 shutil.copy(pot_src, po_dest)
125 os.chmod(po_dest, 0o666)
126 print(f"File created at {po_dest}")
127 else:
128 os.mkdir(po_dir)
129 os.chmod(po_dir, 0o777)
130 shutil.copy(pot_src, po_dest)
131 os.chmod(po_dest, 0o666)
132 print(f"File created at {po_dest}")
133 else:
134 print("Add failed. Missing required locale code.")
135
136
137 @web.memoize
138 def load_translations(lang):
139 po = os.path.join(root, lang, 'messages.po')
140 mo_path = os.path.join(root, lang, 'messages.mo')
141
142 if os.path.exists(mo_path):
143 return Translations(open(mo_path, 'rb'))
144
145 @web.memoize
146 def load_locale(lang):
147 try:
148 return babel.Locale(lang)
149 except babel.UnknownLocaleError:
150 pass
151
152 class GetText:
153 def __call__(self, string, *args, **kwargs):
154 """Translate a given string to the language of the current locale."""
155 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
156 translations = load_translations(web.ctx.lang)
157 value = (translations and translations.ugettext(string)) or string
158
159 if args:
160 value = value % args
161 elif kwargs:
162 value = value % kwargs
163
164 return value
165
166 def __getattr__(self, key):
167 from infogami.utils.i18n import strings
168 # for backward-compatability
169 return strings.get('', key)
170
171 class LazyGetText:
172 def __call__(self, string, *args, **kwargs):
173 """Translate a given string lazily."""
174 return LazyObject(lambda: GetText()(string, *args, **kwargs))
175
176 class LazyObject:
177 def __init__(self, creator):
178 self._creator = creator
179
180 def __str__(self):
181 return web.safestr(self._creator())
182
183 def __repr__(self):
184 return repr(self._creator())
185
186 def __add__(self, other):
187 return self._creator() + other
188
189 def __radd__(self, other):
190 return other + self._creator()
191
192
193 def ungettext(s1, s2, _n, *a, **kw):
194 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
195 translations = load_translations(web.ctx.lang)
196 value = translations and translations.ungettext(s1, s2, _n)
197 if not value:
198 # fallback when translation is not provided
199 if _n == 1:
200 value = s1
201 else:
202 value = s2
203
204 if a:
205 return value % a
206 elif kw:
207 return value % kw
208 else:
209 return value
210
211 def gettext_territory(code):
212 """Returns the territory name in the current locale."""
213 # Get the website locale from the global ctx.lang variable, set in i18n_loadhook
214 locale = load_locale(web.ctx.lang)
215 return locale.territories.get(code, code)
216
217 gettext = GetText()
218 ugettext = gettext
219 lgettext = LazyGetText()
220 _ = gettext
221
[end of openlibrary/i18n/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/i18n/__init__.py b/openlibrary/i18n/__init__.py
--- a/openlibrary/i18n/__init__.py
+++ b/openlibrary/i18n/__init__.py
@@ -30,6 +30,50 @@
raise e
+def _validate_catalog(catalog, locale):
+ validation_errors = []
+ for message in catalog:
+ if message.fuzzy:
+ if message.lineno:
+ validation_errors.append(
+ f'openlibrary/i18n/{locale}/messages.po:{message.lineno}:'
+ f' "{message.string}" is fuzzy.'
+ )
+ else:
+ validation_errors.append(
+ ' File is fuzzy. Remove line containing "#, fuzzy" found near '
+ 'the beginning of the file.'
+ )
+
+ if validation_errors:
+ print("Validation failed...")
+ print("Please correct the following errors before proceeding:")
+ for e in validation_errors:
+ print(e)
+
+ return len(validation_errors) == 0
+
+
+def validate_translations(args):
+ if args:
+ locale = args[0]
+ po_path = os.path.join(root, locale, 'messages.po')
+
+ if os.path.exists(po_path):
+ catalog = read_po(open(po_path, 'rb'))
+ is_valid = _validate_catalog(catalog, locale)
+
+ if is_valid:
+ print(f'Translations for locale "{locale}" are valid!')
+ return is_valid
+ else:
+ print(f'Portable object file for locale "{locale}" does not exist.')
+ return False
+ else:
+ print('Must include locale code when executing validate.')
+ return False
+
+
def get_locales():
return [
d
| {"golden_diff": "diff --git a/openlibrary/i18n/__init__.py b/openlibrary/i18n/__init__.py\n--- a/openlibrary/i18n/__init__.py\n+++ b/openlibrary/i18n/__init__.py\n@@ -30,6 +30,50 @@\n raise e\n \n \n+def _validate_catalog(catalog, locale):\n+ validation_errors = []\n+ for message in catalog:\n+ if message.fuzzy:\n+ if message.lineno:\n+ validation_errors.append(\n+ f'openlibrary/i18n/{locale}/messages.po:{message.lineno}:'\n+ f' \"{message.string}\" is fuzzy.'\n+ )\n+ else:\n+ validation_errors.append(\n+ ' File is fuzzy. Remove line containing \"#, fuzzy\" found near '\n+ 'the beginning of the file.'\n+ )\n+\n+ if validation_errors:\n+ print(\"Validation failed...\")\n+ print(\"Please correct the following errors before proceeding:\")\n+ for e in validation_errors:\n+ print(e)\n+\n+ return len(validation_errors) == 0\n+\n+\n+def validate_translations(args):\n+ if args:\n+ locale = args[0]\n+ po_path = os.path.join(root, locale, 'messages.po')\n+\n+ if os.path.exists(po_path):\n+ catalog = read_po(open(po_path, 'rb'))\n+ is_valid = _validate_catalog(catalog, locale)\n+\n+ if is_valid:\n+ print(f'Translations for locale \"{locale}\" are valid!')\n+ return is_valid\n+ else:\n+ print(f'Portable object file for locale \"{locale}\" does not exist.')\n+ return False\n+ else:\n+ print('Must include locale code when executing validate.')\n+ return False\n+\n+\n def get_locales():\n return [\n d\n", "issue": "Create `i18n-messages validate` script to check for presence of fuzzy flags\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\nWhen `i18n-messages update` is executed, some translations in `.po` files will have a `fuzzy` annotation. This means that the translation may not be completely accurate, and should be manually approved by a translator. The last step of the `i18n-messages update` flow compiles all `.po` files into `.mo` files, regardless of the presence of fuzzy translations.\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nAdd validation step to i18n flow, in between the update and compile steps. Validation step will read each `.po` file, searching for lines that begin with `#, fuzzy`. If any are found in a `.po` file, that file should not be compiled.\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nThis is a sub-task of #5134.\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@cdrini\r\n\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport sys\nfrom typing import List\n\nimport web\nimport os\nimport shutil\n\nimport babel\nfrom babel._compat import BytesIO\nfrom babel.support import Translations\nfrom babel.messages import Catalog\nfrom babel.messages.pofile import read_po, write_po\nfrom babel.messages.mofile import write_mo\nfrom babel.messages.extract import extract_from_file, extract_from_dir, extract_python\n\nroot = os.path.dirname(__file__)\n\ndef _compile_translation(po, mo):\n try:\n catalog = read_po(open(po, 'rb'))\n\n f = open(mo, 'wb')\n write_mo(f, catalog)\n f.close()\n print('compiled', po, file=web.debug)\n except Exception as e:\n print('failed to compile', po, file=web.debug)\n raise e\n\n\ndef get_locales():\n return [\n d\n for d in os.listdir(root)\n if (os.path.isdir(os.path.join(root, d)) and\n os.path.exists(os.path.join(root, d, 'messages.po')))\n ]\n\ndef extract_templetor(fileobj, keywords, comment_tags, options):\n \"\"\"Extract i18n messages from web.py templates.\"\"\"\n try:\n instring = fileobj.read().decode('utf-8')\n # Replace/remove inline js '\\$' which interferes with the Babel python parser:\n cleaned_string = instring.replace('\\$', '')\n code = web.template.Template.generate_code(cleaned_string, fileobj.name)\n f = BytesIO(code.encode('utf-8')) # Babel wants bytes, not strings\n except Exception as e:\n print('Failed to extract ' + fileobj.name + ':', repr(e), file=web.debug)\n return []\n return extract_python(f, keywords, comment_tags, options)\n\n\ndef extract_messages(dirs: List[str]):\n catalog = Catalog(\n project='Open Library',\n copyright_holder='Internet Archive'\n )\n METHODS = [\n (\"**.py\", \"python\"),\n (\"**.html\", \"openlibrary.i18n:extract_templetor\")\n ]\n COMMENT_TAGS = [\"NOTE:\"]\n\n for d in dirs:\n extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS,\n strip_comment_tags=True)\n\n counts = {}\n for filename, lineno, message, comments, context in extracted:\n counts[filename] = counts.get(filename, 0) + 1\n catalog.add(message, None, [(filename, lineno)], auto_comments=comments)\n\n for filename, count in counts.items():\n path = filename if d == filename else os.path.join(d, filename)\n print(f\"{count}\\t{path}\", file=sys.stderr)\n\n path = os.path.join(root, 'messages.pot')\n f = open(path, 'wb')\n write_po(f, catalog)\n f.close()\n\n print('wrote template to', path)\n\ndef compile_translations():\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n _compile_translation(po_path, mo_path)\n\ndef update_translations():\n pot_path = os.path.join(root, 'messages.pot')\n template = read_po(open(pot_path, 'rb'))\n\n for locale in get_locales():\n po_path = os.path.join(root, locale, 'messages.po')\n mo_path = os.path.join(root, locale, 'messages.mo')\n\n if os.path.exists(po_path):\n catalog = read_po(open(po_path, 'rb'))\n catalog.update(template)\n\n f = open(po_path, 'wb')\n write_po(f, catalog)\n f.close()\n print('updated', po_path)\n\n compile_translations()\n\n\ndef generate_po(args):\n if args:\n po_dir = os.path.join(root, args[0])\n pot_src = os.path.join(root, 'messages.pot')\n po_dest = os.path.join(po_dir, 'messages.po')\n\n if os.path.exists(po_dir):\n if os.path.exists(po_dest):\n print(f\"Portable object file already exists at {po_dest}\")\n else:\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n os.mkdir(po_dir)\n os.chmod(po_dir, 0o777)\n shutil.copy(pot_src, po_dest)\n os.chmod(po_dest, 0o666)\n print(f\"File created at {po_dest}\")\n else:\n print(\"Add failed. Missing required locale code.\")\n\n\[email protected]\ndef load_translations(lang):\n po = os.path.join(root, lang, 'messages.po')\n mo_path = os.path.join(root, lang, 'messages.mo')\n\n if os.path.exists(mo_path):\n return Translations(open(mo_path, 'rb'))\n\[email protected]\ndef load_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.UnknownLocaleError:\n pass\n\nclass GetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string to the language of the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = (translations and translations.ugettext(string)) or string\n\n if args:\n value = value % args\n elif kwargs:\n value = value % kwargs\n\n return value\n\n def __getattr__(self, key):\n from infogami.utils.i18n import strings\n # for backward-compatability\n return strings.get('', key)\n\nclass LazyGetText:\n def __call__(self, string, *args, **kwargs):\n \"\"\"Translate a given string lazily.\"\"\"\n return LazyObject(lambda: GetText()(string, *args, **kwargs))\n\nclass LazyObject:\n def __init__(self, creator):\n self._creator = creator\n\n def __str__(self):\n return web.safestr(self._creator())\n\n def __repr__(self):\n return repr(self._creator())\n\n def __add__(self, other):\n return self._creator() + other\n\n def __radd__(self, other):\n return other + self._creator()\n\n\ndef ungettext(s1, s2, _n, *a, **kw):\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n translations = load_translations(web.ctx.lang)\n value = translations and translations.ungettext(s1, s2, _n)\n if not value:\n # fallback when translation is not provided\n if _n == 1:\n value = s1\n else:\n value = s2\n\n if a:\n return value % a\n elif kw:\n return value % kw\n else:\n return value\n\ndef gettext_territory(code):\n \"\"\"Returns the territory name in the current locale.\"\"\"\n # Get the website locale from the global ctx.lang variable, set in i18n_loadhook\n locale = load_locale(web.ctx.lang)\n return locale.territories.get(code, code)\n\ngettext = GetText()\nugettext = gettext\nlgettext = LazyGetText()\n_ = gettext\n", "path": "openlibrary/i18n/__init__.py"}]} | 3,012 | 402 |
gh_patches_debug_1113 | rasdani/github-patches | git_diff | Pylons__pyramid-2225 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
</issue>
<code>
[start of setup.py]
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.6',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Development Status :: 6 - Mature",
79 "Intended Audience :: Developers",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 2.6",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.2",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.6',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 2,094 | 106 |
gh_patches_debug_5381 | rasdani/github-patches | git_diff | ManimCommunity__manim-1053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken source links on the stable version of the documentation
## Description of bug / unexpected behavior
Source links on the stable version of documentation does not work. It links to something like this: https://github.com/ManimCommunity/manim/blob/stable/manim/mobject/changing.py which is a 404 error.
## Expected behavior
Source links should link to a file containing source code for the stable version.
## How to reproduce the issue
On the documentation website, switch the version to stable. Navigate to and click the source link of any class.
## Additional comments
Perhaps this is an access rights issue, which explains why it evaded detection from community devs for so long?
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from distutils.sysconfig import get_python_lib
16 from pathlib import Path
17
18 sys.path.insert(0, os.path.abspath("."))
19
20
21 if os.environ.get("READTHEDOCS") == "True":
22 site_path = get_python_lib()
23 # we need to add ffmpeg to the path
24 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries")
25 # the included binary is named ffmpeg-linux..., create a symlink
26 [ffmpeg_bin] = [
27 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-")
28 ]
29 os.symlink(
30 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg")
31 )
32 os.environ["PATH"] += os.pathsep + ffmpeg_path
33
34
35 # -- Project information -----------------------------------------------------
36
37 project = "Manim"
38 copyright = "2020, The Manim Community Dev Team"
39 author = "The Manim Community Dev Team"
40
41
42 # -- General configuration ---------------------------------------------------
43
44 # Add any Sphinx extension module names here, as strings. They can be
45 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
46 # ones.
47 extensions = [
48 "sphinx.ext.autodoc",
49 "recommonmark",
50 "sphinx_copybutton",
51 "sphinx.ext.napoleon",
52 "sphinx.ext.autosummary",
53 "sphinx.ext.doctest",
54 "sphinx.ext.extlinks",
55 "sphinx.ext.linkcode",
56 "sphinxext.opengraph",
57 "manim_directive",
58 ]
59
60 # Automatically generate stub pages when using the .. autosummary directive
61 autosummary_generate = True
62
63 # generate documentation from type hints
64 autodoc_typehints = "description"
65 autoclass_content = "both"
66
67 # controls whether functions documented by the autofunction directive
68 # appear with their full module names
69 add_module_names = False
70
71 # Add any paths that contain templates here, relative to this directory.
72 templates_path = ["_templates"]
73
74 # Custom section headings in our documentation
75 napoleon_custom_sections = ["Tests", ("Test", "Tests")]
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 # This pattern also affects html_static_path and html_extra_path.
80 exclude_patterns = []
81
82
83 # -- Options for HTML output -------------------------------------------------
84
85 # The theme to use for HTML and HTML Help pages. See the documentation for
86 # a list of builtin themes.
87 #
88 import guzzle_sphinx_theme
89
90 html_theme_path = guzzle_sphinx_theme.html_theme_path()
91 html_theme = "guzzle_sphinx_theme"
92 html_favicon = str(Path("_static/favicon.ico"))
93
94 # There's a standing issue with Sphinx's new-style sidebars. This is a
95 # workaround. Taken from
96 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826
97 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
98
99 # Register the theme as an extension to generate a sitemap.xml
100 extensions.append("guzzle_sphinx_theme")
101
102 # Add any paths that contain custom static files (such as style sheets) here,
103 # relative to this directory. They are copied after the builtin static files,
104 # so a file named "default.css" will overwrite the builtin "default.css".
105 html_static_path = ["_static"]
106
107 # This specifies any additional css files that will override the theme's
108 html_css_files = ["custom.css"]
109
110 # source links to github
111 def linkcode_resolve(domain, info):
112 if domain != "py":
113 return None
114 if not info["module"]:
115 return None
116 filename = info["module"].replace(".", "/")
117 version = os.getenv("READTHEDOCS_VERSION", "master")
118 if version == "latest":
119 version = "master"
120 return f"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py"
121
122
123 # external links
124 extlinks = {
125 "issue": ("https://github.com/ManimCommunity/manim/issues/%s", "issue "),
126 "pr": ("https://github.com/ManimCommunity/manim/pull/%s", "pull request "),
127 }
128
129 # opengraph settings
130 ogp_image = "https://www.manim.community/logo.png"
131 ogp_site_name = "Manim Community | Documentation"
132 ogp_site_url = "https://docs.manim.community/"
133
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -115,7 +115,7 @@
return None
filename = info["module"].replace(".", "/")
version = os.getenv("READTHEDOCS_VERSION", "master")
- if version == "latest":
+ if version == "latest" or version == "stable":
version = "master"
return f"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py"
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -115,7 +115,7 @@\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n version = os.getenv(\"READTHEDOCS_VERSION\", \"master\")\n- if version == \"latest\":\n+ if version == \"latest\" or version == \"stable\":\n version = \"master\"\n return f\"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py\"\n", "issue": "Broken source links on the stable version of the documentation\n## Description of bug / unexpected behavior\nSource links on the stable version of documentation does not work. It links to something like this: https://github.com/ManimCommunity/manim/blob/stable/manim/mobject/changing.py which is a 404 error. \n\n## Expected behavior\nSource links should link to a file containing source code for the stable version. \n\n## How to reproduce the issue\nOn the documentation website, switch the version to stable. Navigate to and click the source link of any class. \n\n## Additional comments\nPerhaps this is an access rights issue, which explains why it evaded detection from community devs for so long?\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.linkcode\",\n \"sphinxext.opengraph\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n# source links to github\ndef linkcode_resolve(domain, info):\n if domain != \"py\":\n return None\n if not info[\"module\"]:\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n version = os.getenv(\"READTHEDOCS_VERSION\", \"master\")\n if version == \"latest\":\n version = \"master\"\n return f\"https://github.com/ManimCommunity/manim/blob/{version}/{filename}.py\"\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"issue \"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"pull request \"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n", "path": "docs/source/conf.py"}]} | 2,042 | 122 |
gh_patches_debug_11888 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3162 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Additional context for `Mixed types x and y in attribute value sequence` warnings
**Is your feature request related to a problem?**
I'm getting a whole lot of warnings like
```
opentelemetry.attributes.Mixed types str and int in attribute value sequence
```
But they're missing context in order to resolve them. I'd like to know what attribute it is that has mixed types.
**Describe the solution you'd like**
Include `key` value in the message to add some more context and give a better chance to resolve the warning.
**Describe alternatives you've considered**
None
**Additional context**
Log is emitted from the lines below
https://github.com/open-telemetry/opentelemetry-python/blob/e0e6a3a940c16c1df6493e258ccfbc57ac38cf96/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L86-L91
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 import logging
17 import threading
18 from collections import OrderedDict
19 from collections.abc import MutableMapping
20 from typing import Optional, Sequence, Union
21
22 from opentelemetry.util import types
23
24 # bytes are accepted as a user supplied value for attributes but
25 # decoded to strings internally.
26 _VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)
27
28
29 _logger = logging.getLogger(__name__)
30
31
32 def _clean_attribute(
33 key: str, value: types.AttributeValue, max_len: Optional[int]
34 ) -> Optional[types.AttributeValue]:
35 """Checks if attribute value is valid and cleans it if required.
36
37 The function returns the cleaned value or None if the value is not valid.
38
39 An attribute value is valid if it is either:
40 - A primitive type: string, boolean, double precision floating
41 point (IEEE 754-1985) or integer.
42 - An array of primitive type values. The array MUST be homogeneous,
43 i.e. it MUST NOT contain values of different types.
44
45 An attribute needs cleansing if:
46 - Its length is greater than the maximum allowed length.
47 - It needs to be encoded/decoded e.g, bytes to strings.
48 """
49
50 if not (key and isinstance(key, str)):
51 _logger.warning("invalid key `%s`. must be non-empty string.", key)
52 return None
53
54 if isinstance(value, _VALID_ATTR_VALUE_TYPES):
55 return _clean_attribute_value(value, max_len)
56
57 if isinstance(value, Sequence):
58 sequence_first_valid_type = None
59 cleaned_seq = []
60
61 for element in value:
62 element = _clean_attribute_value(element, max_len)
63 if element is None:
64 cleaned_seq.append(element)
65 continue
66
67 element_type = type(element)
68 # Reject attribute value if sequence contains a value with an incompatible type.
69 if element_type not in _VALID_ATTR_VALUE_TYPES:
70 _logger.warning(
71 "Invalid type %s in attribute value sequence. Expected one of "
72 "%s or None",
73 element_type.__name__,
74 [
75 valid_type.__name__
76 for valid_type in _VALID_ATTR_VALUE_TYPES
77 ],
78 )
79 return None
80
81 # The type of the sequence must be homogeneous. The first non-None
82 # element determines the type of the sequence
83 if sequence_first_valid_type is None:
84 sequence_first_valid_type = element_type
85 # use equality instead of isinstance as isinstance(True, int) evaluates to True
86 elif element_type != sequence_first_valid_type:
87 _logger.warning(
88 "Mixed types %s and %s in attribute value sequence",
89 sequence_first_valid_type.__name__,
90 type(element).__name__,
91 )
92 return None
93
94 cleaned_seq.append(element)
95
96 # Freeze mutable sequences defensively
97 return tuple(cleaned_seq)
98
99 _logger.warning(
100 "Invalid type %s for attribute '%s' value. Expected one of %s or a "
101 "sequence of those types",
102 type(value).__name__,
103 key,
104 [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
105 )
106 return None
107
108
109 def _clean_attribute_value(
110 value: types.AttributeValue, limit: Optional[int]
111 ) -> Union[types.AttributeValue, None]:
112 if value is None:
113 return None
114
115 if isinstance(value, bytes):
116 try:
117 value = value.decode()
118 except UnicodeDecodeError:
119 _logger.warning("Byte attribute could not be decoded.")
120 return None
121
122 if limit is not None and isinstance(value, str):
123 value = value[:limit]
124 return value
125
126
127 class BoundedAttributes(MutableMapping):
128 """An ordered dict with a fixed max capacity.
129
130 Oldest elements are dropped when the dict is full and a new element is
131 added.
132 """
133
134 def __init__(
135 self,
136 maxlen: Optional[int] = None,
137 attributes: types.Attributes = None,
138 immutable: bool = True,
139 max_value_len: Optional[int] = None,
140 ):
141 if maxlen is not None:
142 if not isinstance(maxlen, int) or maxlen < 0:
143 raise ValueError(
144 "maxlen must be valid int greater or equal to 0"
145 )
146 self.maxlen = maxlen
147 self.dropped = 0
148 self.max_value_len = max_value_len
149 self._dict = OrderedDict() # type: OrderedDict
150 self._lock = threading.Lock() # type: threading.Lock
151 if attributes:
152 for key, value in attributes.items():
153 self[key] = value
154 self._immutable = immutable
155
156 def __repr__(self):
157 return (
158 f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})"
159 )
160
161 def __getitem__(self, key):
162 return self._dict[key]
163
164 def __setitem__(self, key, value):
165 if getattr(self, "_immutable", False):
166 raise TypeError
167 with self._lock:
168 if self.maxlen is not None and self.maxlen == 0:
169 self.dropped += 1
170 return
171
172 value = _clean_attribute(key, value, self.max_value_len)
173 if value is not None:
174 if key in self._dict:
175 del self._dict[key]
176 elif (
177 self.maxlen is not None and len(self._dict) == self.maxlen
178 ):
179 self._dict.popitem(last=False)
180 self.dropped += 1
181
182 self._dict[key] = value
183
184 def __delitem__(self, key):
185 if getattr(self, "_immutable", False):
186 raise TypeError
187 with self._lock:
188 del self._dict[key]
189
190 def __iter__(self):
191 with self._lock:
192 return iter(self._dict.copy())
193
194 def __len__(self):
195 return len(self._dict)
196
197 def copy(self):
198 return self._dict.copy()
199
[end of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
@@ -85,7 +85,8 @@
# use equality instead of isinstance as isinstance(True, int) evaluates to True
elif element_type != sequence_first_valid_type:
_logger.warning(
- "Mixed types %s and %s in attribute value sequence",
+ "Attribute %r mixes types %s and %s in attribute value sequence",
+ key,
sequence_first_valid_type.__name__,
type(element).__name__,
)
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n@@ -85,7 +85,8 @@\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n- \"Mixed types %s and %s in attribute value sequence\",\n+ \"Attribute %r mixes types %s and %s in attribute value sequence\",\n+ key,\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n", "issue": "Additional context for `Mixed types x and y in attribute value sequence` warnings\n**Is your feature request related to a problem?**\r\nI'm getting a whole lot of warnings like\r\n\r\n```\r\nopentelemetry.attributes.Mixed types str and int in attribute value sequence\r\n```\r\n\r\nBut they're missing context in order to resolve them. I'd like to know what attribute it is that has mixed types.\r\n\r\n**Describe the solution you'd like**\r\nInclude `key` value in the message to add some more context and give a better chance to resolve the warning.\r\n\r\n**Describe alternatives you've considered**\r\n\r\nNone\r\n\r\n**Additional context**\r\n\r\nLog is emitted from the lines below\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/e0e6a3a940c16c1df6493e258ccfbc57ac38cf96/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L86-L91\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom typing import Optional, Sequence, Union\n\nfrom opentelemetry.util import types\n\n# bytes are accepted as a user supplied value for attributes but\n# decoded to strings internally.\n_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _clean_attribute(\n key: str, value: types.AttributeValue, max_len: Optional[int]\n) -> Optional[types.AttributeValue]:\n \"\"\"Checks if attribute value is valid and cleans it if required.\n\n The function returns the cleaned value or None if the value is not valid.\n\n An attribute value is valid if it is either:\n - A primitive type: string, boolean, double precision floating\n point (IEEE 754-1985) or integer.\n - An array of primitive type values. The array MUST be homogeneous,\n i.e. it MUST NOT contain values of different types.\n\n An attribute needs cleansing if:\n - Its length is greater than the maximum allowed length.\n - It needs to be encoded/decoded e.g, bytes to strings.\n \"\"\"\n\n if not (key and isinstance(key, str)):\n _logger.warning(\"invalid key `%s`. must be non-empty string.\", key)\n return None\n\n if isinstance(value, _VALID_ATTR_VALUE_TYPES):\n return _clean_attribute_value(value, max_len)\n\n if isinstance(value, Sequence):\n sequence_first_valid_type = None\n cleaned_seq = []\n\n for element in value:\n element = _clean_attribute_value(element, max_len)\n if element is None:\n cleaned_seq.append(element)\n continue\n\n element_type = type(element)\n # Reject attribute value if sequence contains a value with an incompatible type.\n if element_type not in _VALID_ATTR_VALUE_TYPES:\n _logger.warning(\n \"Invalid type %s in attribute value sequence. Expected one of \"\n \"%s or None\",\n element_type.__name__,\n [\n valid_type.__name__\n for valid_type in _VALID_ATTR_VALUE_TYPES\n ],\n )\n return None\n\n # The type of the sequence must be homogeneous. The first non-None\n # element determines the type of the sequence\n if sequence_first_valid_type is None:\n sequence_first_valid_type = element_type\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n \"Mixed types %s and %s in attribute value sequence\",\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n return None\n\n cleaned_seq.append(element)\n\n # Freeze mutable sequences defensively\n return tuple(cleaned_seq)\n\n _logger.warning(\n \"Invalid type %s for attribute '%s' value. Expected one of %s or a \"\n \"sequence of those types\",\n type(value).__name__,\n key,\n [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n )\n return None\n\n\ndef _clean_attribute_value(\n value: types.AttributeValue, limit: Optional[int]\n) -> Union[types.AttributeValue, None]:\n if value is None:\n return None\n\n if isinstance(value, bytes):\n try:\n value = value.decode()\n except UnicodeDecodeError:\n _logger.warning(\"Byte attribute could not be decoded.\")\n return None\n\n if limit is not None and isinstance(value, str):\n value = value[:limit]\n return value\n\n\nclass BoundedAttributes(MutableMapping):\n \"\"\"An ordered dict with a fixed max capacity.\n\n Oldest elements are dropped when the dict is full and a new element is\n added.\n \"\"\"\n\n def __init__(\n self,\n maxlen: Optional[int] = None,\n attributes: types.Attributes = None,\n immutable: bool = True,\n max_value_len: Optional[int] = None,\n ):\n if maxlen is not None:\n if not isinstance(maxlen, int) or maxlen < 0:\n raise ValueError(\n \"maxlen must be valid int greater or equal to 0\"\n )\n self.maxlen = maxlen\n self.dropped = 0\n self.max_value_len = max_value_len\n self._dict = OrderedDict() # type: OrderedDict\n self._lock = threading.Lock() # type: threading.Lock\n if attributes:\n for key, value in attributes.items():\n self[key] = value\n self._immutable = immutable\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})\"\n )\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __setitem__(self, key, value):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n if self.maxlen is not None and self.maxlen == 0:\n self.dropped += 1\n return\n\n value = _clean_attribute(key, value, self.max_value_len)\n if value is not None:\n if key in self._dict:\n del self._dict[key]\n elif (\n self.maxlen is not None and len(self._dict) == self.maxlen\n ):\n self._dict.popitem(last=False)\n self.dropped += 1\n\n self._dict[key] = value\n\n def __delitem__(self, key):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n del self._dict[key]\n\n def __iter__(self):\n with self._lock:\n return iter(self._dict.copy())\n\n def __len__(self):\n return len(self._dict)\n\n def copy(self):\n return self._dict.copy()\n", "path": "opentelemetry-api/src/opentelemetry/attributes/__init__.py"}]} | 2,645 | 165 |
gh_patches_debug_7772 | rasdani/github-patches | git_diff | OctoPrint__OctoPrint-3054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Software update fails on Debian testing/unstable
Tested with Octoprint 1.3.10
Right now, the software update will not work in Debian testing and unstable. The problem here is that Debian decided to name its python version `2.7.15+` (yes the '+' is part of the version string returned by `python --version`. Octoprint's version compare cannot cope with this and sees this as < 2.7.9 (which leads to a very confusing output of the software update component telling you why it doesn't want to update: `Python: 2.7.9 (you have: 2.7.15+)` ... took me some time to figure out what is actually going on)
There is a bug report for Debian's python2.7 package already here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=914072
Sadly there is no feedback from the Debian maintainers on why they named it this way and if this might be changed again in the future.
</issue>
<code>
[start of src/octoprint/util/version.py]
1 # coding=utf-8
2 """
3 This module provides a bunch of utility methods and helpers for version handling.
4 """
5 from __future__ import absolute_import, division, print_function
6
7 __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
8
9 import pkg_resources
10 import logging
11
12 from octoprint import __version__
13
14
15 def get_octoprint_version_string():
16 return __version__
17
18
19 def get_octoprint_version(base=False):
20 octoprint_version_string = get_octoprint_version_string()
21 return get_comparable_version(octoprint_version_string, base=base)
22
23
24 def is_released_octoprint_version(version=None):
25 """
26 >>> import pkg_resources
27 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3"))
28 True
29 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3.dev2+g1234"))
30 False
31 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6"))
32 True
33 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1+g1234"))
34 True
35 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1.dev0+g1234"))
36 False
37 >>> is_released_octoprint_version(version=pkg_resources.parse_version("1.3.7.dev123+g23545"))
38 False
39 """
40
41 if version is None:
42 version = get_octoprint_version()
43
44 if isinstance(version, tuple):
45 # old setuptools
46 return "*@" not in version
47 else:
48 # new setuptools
49 return "dev" not in version.public
50
51
52 def is_stable_octoprint_version(version=None):
53 """
54 >>> import pkg_resources
55 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3"))
56 False
57 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6rc3.dev2+g1234"))
58 False
59 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6"))
60 True
61 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1+g1234"))
62 True
63 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.6.post1.dev0+g1234"))
64 False
65 >>> is_stable_octoprint_version(version=pkg_resources.parse_version("1.3.7.dev123+g23545"))
66 False
67 """
68
69 if version is None:
70 version = get_octoprint_version()
71
72 if not is_released_octoprint_version(version=version):
73 return False
74
75 if isinstance(version, tuple):
76 return "*a" not in version and "*b" not in version and "*c" not in version
77 else:
78 return not version.is_prerelease
79
80
81 def is_octoprint_compatible(*compatibility_entries, **kwargs):
82 """
83 Tests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.
84
85 Arguments:
86 compatibility_entries (str): compatibility string(s) to test against, result will be `True` if any match
87 is found
88 octoprint_version (tuple or SetuptoolsVersion): optional OctoPrint version to match against, if not current
89 base version will be determined via :func:`get_octoprint_version`.
90
91 Returns:
92 (bool) ``True`` if any of the provided compatibility entries matches or there are no entries, else ``False``
93 """
94
95 logger = logging.getLogger(__name__)
96
97 if not compatibility_entries:
98 return True
99
100 octoprint_version = kwargs.get("octoprint_version")
101 if octoprint_version is None:
102 octoprint_version = get_octoprint_version(base=True)
103
104 for octo_compat in compatibility_entries:
105 try:
106 if not any(octo_compat.startswith(c) for c in ("<", "<=", "!=", "==", ">=", ">", "~=", "===")):
107 octo_compat = ">={}".format(octo_compat)
108
109 s = pkg_resources.Requirement.parse("OctoPrint" + octo_compat)
110 if octoprint_version in s:
111 break
112 except:
113 logger.exception("Something is wrong with this compatibility string for OctoPrint: {}".format(octo_compat))
114 else:
115 return False
116
117 return True
118
119
120 def get_comparable_version(version_string, base=False):
121 if "-" in version_string:
122 version_string = version_string[:version_string.find("-")]
123
124 version = pkg_resources.parse_version(version_string)
125
126 # A leading v is common in github release tags and old setuptools doesn't remove it.
127 if version and isinstance(version, tuple) and version[0].lower() == "*v":
128 version = version[1:]
129
130 if base:
131 if isinstance(version, tuple):
132 # old setuptools
133 base_version = []
134 for part in version:
135 if part.startswith("*"):
136 break
137 base_version.append(part)
138 base_version.append("*final")
139 version = tuple(base_version)
140 else:
141 # new setuptools
142 version = pkg_resources.parse_version(version.base_version)
143 return version
144
[end of src/octoprint/util/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/octoprint/util/version.py b/src/octoprint/util/version.py
--- a/src/octoprint/util/version.py
+++ b/src/octoprint/util/version.py
@@ -121,6 +121,10 @@
if "-" in version_string:
version_string = version_string[:version_string.find("-")]
+ # Debian has the python version set to 2.7.15+ which is not PEP440 compliant (bug 914072)
+ if version_string.endswith("+"):
+ version_string = version_string[:-1]
+
version = pkg_resources.parse_version(version_string)
# A leading v is common in github release tags and old setuptools doesn't remove it.
| {"golden_diff": "diff --git a/src/octoprint/util/version.py b/src/octoprint/util/version.py\n--- a/src/octoprint/util/version.py\n+++ b/src/octoprint/util/version.py\n@@ -121,6 +121,10 @@\n \tif \"-\" in version_string:\n \t\tversion_string = version_string[:version_string.find(\"-\")]\n \n+\t# Debian has the python version set to 2.7.15+ which is not PEP440 compliant (bug 914072)\n+\tif version_string.endswith(\"+\"):\n+\t\tversion_string = version_string[:-1]\n+\n \tversion = pkg_resources.parse_version(version_string)\n \n \t# A leading v is common in github release tags and old setuptools doesn't remove it.\n", "issue": "Software update fails on Debian testing/unstable\nTested with Octoprint 1.3.10\r\n\r\nRight now, the software update will not work in Debian testing and unstable. The problem here is that Debian decided to name its python version `2.7.15+` (yes the '+' is part of the version string returned by `python --version`. Octoprint's version compare cannot cope with this and sees this as < 2.7.9 (which leads to a very confusing output of the software update component telling you why it doesn't want to update: `Python: 2.7.9 (you have: 2.7.15+)` ... took me some time to figure out what is actually going on)\r\n\r\nThere is a bug report for Debian's python2.7 package already here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=914072\r\nSadly there is no feedback from the Debian maintainers on why they named it this way and if this might be changed again in the future.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nThis module provides a bunch of utility methods and helpers for version handling.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\nimport pkg_resources\nimport logging\n\nfrom octoprint import __version__\n\n\ndef get_octoprint_version_string():\n\treturn __version__\n\n\ndef get_octoprint_version(base=False):\n\toctoprint_version_string = get_octoprint_version_string()\n\treturn get_comparable_version(octoprint_version_string, base=base)\n\n\ndef is_released_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_released_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif isinstance(version, tuple):\n\t\t# old setuptools\n\t\treturn \"*@\" not in version\n\telse:\n\t\t# new setuptools\n\t\treturn \"dev\" not in version.public\n\n\ndef is_stable_octoprint_version(version=None):\n\t\"\"\"\n\t>>> import pkg_resources\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6rc3.dev2+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1+g1234\"))\n\tTrue\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.6.post1.dev0+g1234\"))\n\tFalse\n\t>>> is_stable_octoprint_version(version=pkg_resources.parse_version(\"1.3.7.dev123+g23545\"))\n\tFalse\n\t\"\"\"\n\n\tif version is None:\n\t\tversion = get_octoprint_version()\n\n\tif not is_released_octoprint_version(version=version):\n\t\treturn False\n\n\tif isinstance(version, tuple):\n\t\treturn \"*a\" not in version and \"*b\" not in version and \"*c\" not in version\n\telse:\n\t\treturn not version.is_prerelease\n\n\ndef is_octoprint_compatible(*compatibility_entries, **kwargs):\n\t\"\"\"\n\tTests if the current ``octoprint_version`` is compatible to any of the provided ``compatibility_entries``.\n\n\tArguments:\n\t\tcompatibility_entries (str): compatibility string(s) to test against, result will be `True` if any match\n\t\t\tis found\n\t\toctoprint_version (tuple or SetuptoolsVersion): optional OctoPrint version to match against, if not current\n\t\t\tbase version will be determined via :func:`get_octoprint_version`.\n\n\tReturns:\n\t\t(bool) ``True`` if any of the provided compatibility entries matches or there are no entries, else ``False``\n\t\"\"\"\n\n\tlogger = logging.getLogger(__name__)\n\n\tif not compatibility_entries:\n\t\treturn True\n\n\toctoprint_version = kwargs.get(\"octoprint_version\")\n\tif octoprint_version is None:\n\t\toctoprint_version = get_octoprint_version(base=True)\n\n\tfor octo_compat in compatibility_entries:\n\t\ttry:\n\t\t\tif not any(octo_compat.startswith(c) for c in (\"<\", \"<=\", \"!=\", \"==\", \">=\", \">\", \"~=\", \"===\")):\n\t\t\t\tocto_compat = \">={}\".format(octo_compat)\n\n\t\t\ts = pkg_resources.Requirement.parse(\"OctoPrint\" + octo_compat)\n\t\t\tif octoprint_version in s:\n\t\t\t\tbreak\n\t\texcept:\n\t\t\tlogger.exception(\"Something is wrong with this compatibility string for OctoPrint: {}\".format(octo_compat))\n\telse:\n\t\treturn False\n\n\treturn True\n\n\ndef get_comparable_version(version_string, base=False):\n\tif \"-\" in version_string:\n\t\tversion_string = version_string[:version_string.find(\"-\")]\n\n\tversion = pkg_resources.parse_version(version_string)\n\n\t# A leading v is common in github release tags and old setuptools doesn't remove it.\n\tif version and isinstance(version, tuple) and version[0].lower() == \"*v\":\n\t\tversion = version[1:]\n\n\tif base:\n\t\tif isinstance(version, tuple):\n\t\t\t# old setuptools\n\t\t\tbase_version = []\n\t\t\tfor part in version:\n\t\t\t\tif part.startswith(\"*\"):\n\t\t\t\t\tbreak\n\t\t\t\tbase_version.append(part)\n\t\t\tbase_version.append(\"*final\")\n\t\t\tversion = tuple(base_version)\n\t\telse:\n\t\t\t# new setuptools\n\t\t\tversion = pkg_resources.parse_version(version.base_version)\n\treturn version\n", "path": "src/octoprint/util/version.py"}]} | 2,322 | 161 |
gh_patches_debug_28067 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-773 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add command line option for deriv_root
As I develop a Docker App using mne-bids-pipeline, I want a straightforward way to overwrite the bids_root and deriv_root values defined in the configuration file from the command line. This enhancement aims to simplify Docker volume mapping.
Currently, the option to override `bids_root `is already available ([source](https://github.com/mne-tools/mne-bids-pipeline/blob/ea95979fdb0ef807b3cd262da409cf010ada3da3/mne_bids_pipeline/_main.py#L56)), but there is no similar option for `deriv_root`.
I suggest adding a command-line option to the mne-bids-pipeline tool to allow users to specify the deriv_root directory. This behavior is similar to other BidsApps such as `FMRIprep `and `MRIQC`, where command line arguments overwrite configuration files.
If you are interested, I would be happy to contribute by taking care of the pull request.
</issue>
<code>
[start of mne_bids_pipeline/_main.py]
1 import argparse
2 import pathlib
3 from textwrap import dedent
4 import time
5 from typing import List
6 from types import ModuleType, SimpleNamespace
7
8 import numpy as np
9
10 from ._config_utils import _get_step_modules
11 from ._config_import import _import_config
12 from ._config_template import create_template_config
13 from ._logging import logger, gen_log_kwargs
14 from ._parallel import get_parallel_backend
15 from ._run import _short_step_path
16
17
18 def main():
19 from . import __version__
20
21 parser = argparse.ArgumentParser()
22 parser.add_argument(
23 "--version", action="version", version=f"%(prog)s {__version__}"
24 )
25 parser.add_argument("config", nargs="?", default=None)
26 parser.add_argument(
27 "--config",
28 dest="config_switch",
29 default=None,
30 metavar="FILE",
31 help="The path of the pipeline configuration file to use.",
32 )
33 parser.add_argument(
34 "--create-config",
35 dest="create_config",
36 default=None,
37 metavar="FILE",
38 help="Create a template configuration file with the specified name. "
39 "If specified, all other parameters will be ignored.",
40 ),
41 parser.add_argument(
42 "--steps",
43 dest="steps",
44 default="all",
45 help=dedent(
46 """\
47 The processing steps to run.
48 Can either be one of the processing groups 'preprocessing', sensor',
49 'source', 'report', or 'all', or the name of a processing group plus
50 the desired step sans the step number and
51 filename extension, separated by a '/'. For example, to run ICA, you
52 would pass 'sensor/run_ica`. If unspecified, will run all processing
53 steps. Can also be a tuple of steps."""
54 ),
55 )
56 parser.add_argument(
57 "--root-dir",
58 dest="root_dir",
59 default=None,
60 help="BIDS root directory of the data to process.",
61 )
62 parser.add_argument(
63 "--subject", dest="subject", default=None, help="The subject to process."
64 )
65 parser.add_argument(
66 "--session", dest="session", default=None, help="The session to process."
67 )
68 parser.add_argument(
69 "--task", dest="task", default=None, help="The task to process."
70 )
71 parser.add_argument("--run", dest="run", default=None, help="The run to process.")
72 parser.add_argument(
73 "--n_jobs",
74 dest="n_jobs",
75 type=int,
76 default=None,
77 help="The number of parallel processes to execute.",
78 )
79 parser.add_argument(
80 "--interactive",
81 dest="interactive",
82 action="store_true",
83 help="Enable interactive mode.",
84 )
85 parser.add_argument(
86 "--debug", dest="debug", action="store_true", help="Enable debugging on error."
87 )
88 parser.add_argument(
89 "--no-cache",
90 dest="no_cache",
91 action="store_true",
92 help="Disable caching of intermediate results.",
93 )
94 options = parser.parse_args()
95
96 if options.create_config is not None:
97 target_path = pathlib.Path(options.create_config)
98 create_template_config(target_path=target_path, overwrite=False)
99 return
100
101 config = options.config
102 config_switch = options.config_switch
103 bad = False
104 if config is None:
105 if config_switch is None:
106 bad = "neither was provided"
107 else:
108 config = config_switch
109 elif config_switch is not None:
110 bad = "both were provided"
111 if bad:
112 parser.error(
113 "❌ You must specify a configuration file either as a single "
114 f"argument or with --config, but {bad}."
115 )
116 steps = options.steps
117 root_dir = options.root_dir
118 subject, session = options.subject, options.session
119 task, run = options.task, options.run
120 n_jobs = options.n_jobs
121 interactive, debug = options.interactive, options.debug
122 cache = not options.no_cache
123
124 if isinstance(steps, str) and "," in steps:
125 # Work around limitation in Fire: --steps=foo,bar/baz won't produce a
126 # tuple ('foo', 'bar/baz'), but a string 'foo,bar/baz'.
127 steps = tuple(steps.split(","))
128 elif isinstance(steps, str):
129 steps = (steps,)
130
131 on_error = "debug" if debug else None
132 cache = "1" if cache else "0"
133
134 processing_stages = []
135 processing_steps = []
136 for steps_ in steps:
137 if "/" in steps_:
138 stage, step = steps_.split("/")
139 processing_stages.append(stage)
140 processing_steps.append(step)
141 else:
142 # User specified "sensor", "preprocessing" or similar, but without
143 # any further grouping.
144 processing_stages.append(steps_)
145 processing_steps.append(None)
146
147 config_path = pathlib.Path(config).expanduser().resolve(strict=True)
148 overrides = SimpleNamespace()
149 if root_dir:
150 overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)
151 if subject:
152 overrides.subjects = [subject]
153 if session:
154 overrides.sessions = [session]
155 if task:
156 overrides.task = task
157 if run:
158 overrides.runs = run
159 if interactive:
160 overrides.interactive = interactive
161 if n_jobs:
162 overrides.n_jobs = int(n_jobs)
163 if on_error:
164 overrides.on_error = on_error
165 if not cache:
166 overrides.memory_location = False
167
168 step_modules: List[ModuleType] = []
169 STEP_MODULES = _get_step_modules()
170 for stage, step in zip(processing_stages, processing_steps):
171 if stage not in STEP_MODULES.keys():
172 raise ValueError(
173 f"Invalid step requested: '{stage}'. "
174 f"It should be one of {list(STEP_MODULES.keys())}."
175 )
176
177 if step is None:
178 # User specified `sensors`, `source`, or similar
179 step_modules.extend(STEP_MODULES[stage])
180 else:
181 # User specified 'stage/step'
182 for step_module in STEP_MODULES[stage]:
183 step_name = pathlib.Path(step_module.__file__).name
184 if step in step_name:
185 step_modules.append(step_module)
186 break
187 else:
188 # We've iterated over all steps, but none matched!
189 raise ValueError(f"Invalid steps requested: {stage}/{step}")
190
191 if processing_stages[0] != "all":
192 # Always run the directory initialization steps, but skip for 'all',
193 # because it already includes them – and we want to avoid running
194 # them twice.
195 step_modules = [*STEP_MODULES["init"], *step_modules]
196
197 logger.title("Welcome aboard MNE-BIDS-Pipeline! 👋")
198 msg = f"Using configuration: {config}"
199 __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging
200 logger.info(**gen_log_kwargs(message=msg, emoji="📝"))
201 config_imported = _import_config(
202 config_path=config_path,
203 overrides=overrides,
204 )
205 # Initialize dask now
206 with get_parallel_backend(config_imported.exec_params):
207 pass
208 del __mne_bids_pipeline_step__
209 logger.end()
210
211 for step_module in step_modules:
212 start = time.time()
213 step = _short_step_path(pathlib.Path(step_module.__file__))
214 logger.title(title=f"{step}")
215 step_module.main(config=config_imported)
216 elapsed = time.time() - start
217 hours, remainder = divmod(elapsed, 3600)
218 hours = int(hours)
219 minutes, seconds = divmod(remainder, 60)
220 minutes = int(minutes)
221 seconds = int(np.ceil(seconds)) # always take full seconds
222 elapsed = f"{seconds}s"
223 if minutes:
224 elapsed = f"{minutes}m {elapsed}"
225 if hours:
226 elapsed = f"{hours}h {elapsed}"
227 logger.end(f"done ({elapsed})")
228
[end of mne_bids_pipeline/_main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -59,6 +59,18 @@
default=None,
help="BIDS root directory of the data to process.",
)
+ parser.add_argument(
+ "--deriv_root",
+ dest="deriv_root",
+ default=None,
+ help=dedent(
+ """\
+ The root of the derivatives directory
+ in which the pipeline will store the processing results.
+ If unspecified, this will be derivatives/mne-bids-pipeline
+ inside the BIDS root."""
+ ),
+ ),
parser.add_argument(
"--subject", dest="subject", default=None, help="The subject to process."
)
@@ -115,6 +127,7 @@
)
steps = options.steps
root_dir = options.root_dir
+ deriv_root = options.deriv_root
subject, session = options.subject, options.session
task, run = options.task, options.run
n_jobs = options.n_jobs
@@ -148,6 +161,10 @@
overrides = SimpleNamespace()
if root_dir:
overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)
+ if deriv_root:
+ overrides.deriv_root = (
+ pathlib.Path(deriv_root).expanduser().resolve(strict=False)
+ )
if subject:
overrides.subjects = [subject]
if session:
| {"golden_diff": "diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py\n--- a/mne_bids_pipeline/_main.py\n+++ b/mne_bids_pipeline/_main.py\n@@ -59,6 +59,18 @@\n default=None,\n help=\"BIDS root directory of the data to process.\",\n )\n+ parser.add_argument(\n+ \"--deriv_root\",\n+ dest=\"deriv_root\",\n+ default=None,\n+ help=dedent(\n+ \"\"\"\\\n+ The root of the derivatives directory\n+ in which the pipeline will store the processing results.\n+ If unspecified, this will be derivatives/mne-bids-pipeline\n+ inside the BIDS root.\"\"\"\n+ ),\n+ ),\n parser.add_argument(\n \"--subject\", dest=\"subject\", default=None, help=\"The subject to process.\"\n )\n@@ -115,6 +127,7 @@\n )\n steps = options.steps\n root_dir = options.root_dir\n+ deriv_root = options.deriv_root\n subject, session = options.subject, options.session\n task, run = options.task, options.run\n n_jobs = options.n_jobs\n@@ -148,6 +161,10 @@\n overrides = SimpleNamespace()\n if root_dir:\n overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)\n+ if deriv_root:\n+ overrides.deriv_root = (\n+ pathlib.Path(deriv_root).expanduser().resolve(strict=False)\n+ )\n if subject:\n overrides.subjects = [subject]\n if session:\n", "issue": "Add command line option for deriv_root\nAs I develop a Docker App using mne-bids-pipeline, I want a straightforward way to overwrite the bids_root and deriv_root values defined in the configuration file from the command line. This enhancement aims to simplify Docker volume mapping.\r\n\r\nCurrently, the option to override `bids_root `is already available ([source](https://github.com/mne-tools/mne-bids-pipeline/blob/ea95979fdb0ef807b3cd262da409cf010ada3da3/mne_bids_pipeline/_main.py#L56)), but there is no similar option for `deriv_root`.\r\n\r\nI suggest adding a command-line option to the mne-bids-pipeline tool to allow users to specify the deriv_root directory. This behavior is similar to other BidsApps such as `FMRIprep `and `MRIQC`, where command line arguments overwrite configuration files.\r\n\r\nIf you are interested, I would be happy to contribute by taking care of the pull request.\n", "before_files": [{"content": "import argparse\nimport pathlib\nfrom textwrap import dedent\nimport time\nfrom typing import List\nfrom types import ModuleType, SimpleNamespace\n\nimport numpy as np\n\nfrom ._config_utils import _get_step_modules\nfrom ._config_import import _import_config\nfrom ._config_template import create_template_config\nfrom ._logging import logger, gen_log_kwargs\nfrom ._parallel import get_parallel_backend\nfrom ._run import _short_step_path\n\n\ndef main():\n from . import __version__\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"%(prog)s {__version__}\"\n )\n parser.add_argument(\"config\", nargs=\"?\", default=None)\n parser.add_argument(\n \"--config\",\n dest=\"config_switch\",\n default=None,\n metavar=\"FILE\",\n help=\"The path of the pipeline configuration file to use.\",\n )\n parser.add_argument(\n \"--create-config\",\n dest=\"create_config\",\n default=None,\n metavar=\"FILE\",\n help=\"Create a template configuration file with the specified name. \"\n \"If specified, all other parameters will be ignored.\",\n ),\n parser.add_argument(\n \"--steps\",\n dest=\"steps\",\n default=\"all\",\n help=dedent(\n \"\"\"\\\n The processing steps to run.\n Can either be one of the processing groups 'preprocessing', sensor',\n 'source', 'report', or 'all', or the name of a processing group plus\n the desired step sans the step number and\n filename extension, separated by a '/'. For example, to run ICA, you\n would pass 'sensor/run_ica`. If unspecified, will run all processing\n steps. Can also be a tuple of steps.\"\"\"\n ),\n )\n parser.add_argument(\n \"--root-dir\",\n dest=\"root_dir\",\n default=None,\n help=\"BIDS root directory of the data to process.\",\n )\n parser.add_argument(\n \"--subject\", dest=\"subject\", default=None, help=\"The subject to process.\"\n )\n parser.add_argument(\n \"--session\", dest=\"session\", default=None, help=\"The session to process.\"\n )\n parser.add_argument(\n \"--task\", dest=\"task\", default=None, help=\"The task to process.\"\n )\n parser.add_argument(\"--run\", dest=\"run\", default=None, help=\"The run to process.\")\n parser.add_argument(\n \"--n_jobs\",\n dest=\"n_jobs\",\n type=int,\n default=None,\n help=\"The number of parallel processes to execute.\",\n )\n parser.add_argument(\n \"--interactive\",\n dest=\"interactive\",\n action=\"store_true\",\n help=\"Enable interactive mode.\",\n )\n parser.add_argument(\n \"--debug\", dest=\"debug\", action=\"store_true\", help=\"Enable debugging on error.\"\n )\n parser.add_argument(\n \"--no-cache\",\n dest=\"no_cache\",\n action=\"store_true\",\n help=\"Disable caching of intermediate results.\",\n )\n options = parser.parse_args()\n\n if options.create_config is not None:\n target_path = pathlib.Path(options.create_config)\n create_template_config(target_path=target_path, overwrite=False)\n return\n\n config = options.config\n config_switch = options.config_switch\n bad = False\n if config is None:\n if config_switch is None:\n bad = \"neither was provided\"\n else:\n config = config_switch\n elif config_switch is not None:\n bad = \"both were provided\"\n if bad:\n parser.error(\n \"\u274c You must specify a configuration file either as a single \"\n f\"argument or with --config, but {bad}.\"\n )\n steps = options.steps\n root_dir = options.root_dir\n subject, session = options.subject, options.session\n task, run = options.task, options.run\n n_jobs = options.n_jobs\n interactive, debug = options.interactive, options.debug\n cache = not options.no_cache\n\n if isinstance(steps, str) and \",\" in steps:\n # Work around limitation in Fire: --steps=foo,bar/baz won't produce a\n # tuple ('foo', 'bar/baz'), but a string 'foo,bar/baz'.\n steps = tuple(steps.split(\",\"))\n elif isinstance(steps, str):\n steps = (steps,)\n\n on_error = \"debug\" if debug else None\n cache = \"1\" if cache else \"0\"\n\n processing_stages = []\n processing_steps = []\n for steps_ in steps:\n if \"/\" in steps_:\n stage, step = steps_.split(\"/\")\n processing_stages.append(stage)\n processing_steps.append(step)\n else:\n # User specified \"sensor\", \"preprocessing\" or similar, but without\n # any further grouping.\n processing_stages.append(steps_)\n processing_steps.append(None)\n\n config_path = pathlib.Path(config).expanduser().resolve(strict=True)\n overrides = SimpleNamespace()\n if root_dir:\n overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)\n if subject:\n overrides.subjects = [subject]\n if session:\n overrides.sessions = [session]\n if task:\n overrides.task = task\n if run:\n overrides.runs = run\n if interactive:\n overrides.interactive = interactive\n if n_jobs:\n overrides.n_jobs = int(n_jobs)\n if on_error:\n overrides.on_error = on_error\n if not cache:\n overrides.memory_location = False\n\n step_modules: List[ModuleType] = []\n STEP_MODULES = _get_step_modules()\n for stage, step in zip(processing_stages, processing_steps):\n if stage not in STEP_MODULES.keys():\n raise ValueError(\n f\"Invalid step requested: '{stage}'. \"\n f\"It should be one of {list(STEP_MODULES.keys())}.\"\n )\n\n if step is None:\n # User specified `sensors`, `source`, or similar\n step_modules.extend(STEP_MODULES[stage])\n else:\n # User specified 'stage/step'\n for step_module in STEP_MODULES[stage]:\n step_name = pathlib.Path(step_module.__file__).name\n if step in step_name:\n step_modules.append(step_module)\n break\n else:\n # We've iterated over all steps, but none matched!\n raise ValueError(f\"Invalid steps requested: {stage}/{step}\")\n\n if processing_stages[0] != \"all\":\n # Always run the directory initialization steps, but skip for 'all',\n # because it already includes them \u2013 and we want to avoid running\n # them twice.\n step_modules = [*STEP_MODULES[\"init\"], *step_modules]\n\n logger.title(\"Welcome aboard MNE-BIDS-Pipeline! \ud83d\udc4b\")\n msg = f\"Using configuration: {config}\"\n __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udcdd\"))\n config_imported = _import_config(\n config_path=config_path,\n overrides=overrides,\n )\n # Initialize dask now\n with get_parallel_backend(config_imported.exec_params):\n pass\n del __mne_bids_pipeline_step__\n logger.end()\n\n for step_module in step_modules:\n start = time.time()\n step = _short_step_path(pathlib.Path(step_module.__file__))\n logger.title(title=f\"{step}\")\n step_module.main(config=config_imported)\n elapsed = time.time() - start\n hours, remainder = divmod(elapsed, 3600)\n hours = int(hours)\n minutes, seconds = divmod(remainder, 60)\n minutes = int(minutes)\n seconds = int(np.ceil(seconds)) # always take full seconds\n elapsed = f\"{seconds}s\"\n if minutes:\n elapsed = f\"{minutes}m {elapsed}\"\n if hours:\n elapsed = f\"{hours}h {elapsed}\"\n logger.end(f\"done ({elapsed})\")\n", "path": "mne_bids_pipeline/_main.py"}]} | 3,037 | 349 |
gh_patches_debug_4319 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5651 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CA-SK production parser down
## Description
This is an automatic error report generated for Canada Saskatchewan (CA-SK).
Issues:
- No recent data found for `production` parser
- No recent data found for `consumption` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser CA-SK production`
- <a href="https://storage.googleapis.com/electricitymap-parser-logs/CA-SK.html">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
</issue>
<code>
[start of parsers/CA_SK.py]
1 from datetime import datetime, timedelta
2 from logging import Logger, getLogger
3 from typing import List, Optional
4
5 from pytz import timezone
6 from requests import Response, Session
7
8 from parsers.lib.exceptions import ParserException
9
10 TIMEZONE = timezone("America/Regina")
11
12 # URLs for the different endpoints.
13 PRODUCTION_URL = (
14 "https://www.saskpower.com/ignitionapi/PowerUseDashboard/GetPowerUseDashboardData"
15 )
16 CONSUMPTION_URL = "https://www.saskpower.com/ignitionapi/Content/GetNetLoad"
17
18 PRODUCTION_MAPPING = {
19 "Hydro": "hydro",
20 "Wind": "wind",
21 "Solar": "solar",
22 "Natural Gas": "gas",
23 "Coal": "coal",
24 "Other": "unknown", # This is internal consumption, losses, heat recovery facilities and small independent power producers.
25 }
26
27 USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
28
29
30 def validate_zone_key(zone_key: str) -> None:
31 if zone_key != "CA-SK":
32 raise ParserException(
33 "CA_SK.py",
34 f"CA_SK.py is not designed to parse zone_key: {zone_key}.",
35 zone_key,
36 )
37
38
39 def validate_no_datetime(target_datetime: Optional[datetime], zone_key) -> None:
40 if target_datetime:
41 raise ParserException(
42 "CA_SK.py",
43 "This parser is unable to fetch historical data.",
44 zone_key,
45 )
46
47
48 def fetch_production(
49 zone_key: str = "CA-SK",
50 session: Optional[Session] = None,
51 target_datetime: Optional[datetime] = None,
52 logger: Logger = getLogger(__name__),
53 ):
54 """This parser function will currently return the daily average of the day in question as hourly data.
55 This is because the API only returns daily data but the backend expects hourly values.
56 This is in order to facilitate the estimation of the hourly values from the daily average.
57 """
58 # Validate that the zone key is equal to CA-SK.
59 validate_zone_key(zone_key)
60 # Validate that the target_datetime is None as this parser is unable to fetch historical data.
61 validate_no_datetime(target_datetime, zone_key)
62
63 session = session or Session()
64
65 # Set the headers to mimic a user browser as the API will return a 403 if not.
66 headers = {"user-agent": USER_AGENT}
67 response: Response = session.get(PRODUCTION_URL, headers=headers)
68
69 if not response.ok:
70 raise ParserException(
71 "CA_SK.py",
72 f"Failed to fetch production data. Response Code: {response.status_code}\nError:\n{response.text}",
73 zone_key,
74 )
75
76 raw_data = response.json()
77 # Date is in the format "Jan 01, 2020"
78 raw_date = raw_data["SupplyDataText"]
79 date = datetime.strptime(raw_date, "%b %d, %Y")
80 production_data = {}
81
82 for value in raw_data["PowerCacheData"]["generationByType"]:
83 production_data[PRODUCTION_MAPPING[value["type"]]] = value[
84 "totalGenerationForType"
85 ]
86
87 data_list: List[dict] = []
88 # Hack to return hourly data from daily data for the backend as it expects hourly data.
89 for hour in range(0, 24):
90 data_list.append(
91 {
92 "zoneKey": zone_key,
93 "datetime": date.replace(hour=hour, tzinfo=TIMEZONE),
94 "production": production_data,
95 "source": "saskpower.com",
96 }
97 )
98
99 return data_list
100
101
102 def fetch_consumption(
103 zone_key: str = "CA-SK",
104 session: Optional[Session] = None,
105 target_datetime: Optional[datetime] = None,
106 logger: Logger = getLogger(__name__),
107 ):
108 # Validate that the zone key is equal to CA-SK.
109 validate_zone_key(zone_key)
110 # Validate that the target_datetime is None as this parser is unable to fetch historical data.
111 validate_no_datetime(target_datetime, zone_key)
112
113 session = session or Session()
114
115 # Set the headers to mimic a user browser as the API will return a 403 if not.
116 headers = {"user-agent": USER_AGENT}
117
118 response: Response = session.get(CONSUMPTION_URL) # , headers=headers)
119
120 if not response.ok:
121 raise ParserException(
122 "CA_SK.py",
123 f"Failed to fetch consumption data. Response Code: {response.status_code}\nError:\n{response.text}",
124 zone_key,
125 )
126
127 raw_data = response.json()
128
129 now = datetime.now(TIMEZONE)
130
131 # Data is updated every 5 minutes so we assume the data is from a multiple of 5 minutes and has a 5 minute delay from that multiple.
132 assumed_datetime = now.replace(second=0, microsecond=0) - timedelta(
133 minutes=(now.minute % 5) + 5
134 )
135
136 return [
137 {
138 "zoneKey": zone_key,
139 "datetime": assumed_datetime,
140 "consumption": int(raw_data),
141 "source": "saskpower.com",
142 }
143 ]
144
[end of parsers/CA_SK.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/CA_SK.py b/parsers/CA_SK.py
--- a/parsers/CA_SK.py
+++ b/parsers/CA_SK.py
@@ -115,7 +115,7 @@
# Set the headers to mimic a user browser as the API will return a 403 if not.
headers = {"user-agent": USER_AGENT}
- response: Response = session.get(CONSUMPTION_URL) # , headers=headers)
+ response: Response = session.get(CONSUMPTION_URL, headers=headers)
if not response.ok:
raise ParserException(
| {"golden_diff": "diff --git a/parsers/CA_SK.py b/parsers/CA_SK.py\n--- a/parsers/CA_SK.py\n+++ b/parsers/CA_SK.py\n@@ -115,7 +115,7 @@\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n \n- response: Response = session.get(CONSUMPTION_URL) # , headers=headers)\n+ response: Response = session.get(CONSUMPTION_URL, headers=headers)\n \n if not response.ok:\n raise ParserException(\n", "issue": "CA-SK production parser down\n## Description\n\nThis is an automatic error report generated for Canada Saskatchewan (CA-SK).\n\nIssues:\n- No recent data found for `production` parser\n- No recent data found for `consumption` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser CA-SK production`\n- <a href=\"https://storage.googleapis.com/electricitymap-parser-logs/CA-SK.html\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom logging import Logger, getLogger\nfrom typing import List, Optional\n\nfrom pytz import timezone\nfrom requests import Response, Session\n\nfrom parsers.lib.exceptions import ParserException\n\nTIMEZONE = timezone(\"America/Regina\")\n\n# URLs for the different endpoints.\nPRODUCTION_URL = (\n \"https://www.saskpower.com/ignitionapi/PowerUseDashboard/GetPowerUseDashboardData\"\n)\nCONSUMPTION_URL = \"https://www.saskpower.com/ignitionapi/Content/GetNetLoad\"\n\nPRODUCTION_MAPPING = {\n \"Hydro\": \"hydro\",\n \"Wind\": \"wind\",\n \"Solar\": \"solar\",\n \"Natural Gas\": \"gas\",\n \"Coal\": \"coal\",\n \"Other\": \"unknown\", # This is internal consumption, losses, heat recovery facilities and small independent power producers.\n}\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\"\n\n\ndef validate_zone_key(zone_key: str) -> None:\n if zone_key != \"CA-SK\":\n raise ParserException(\n \"CA_SK.py\",\n f\"CA_SK.py is not designed to parse zone_key: {zone_key}.\",\n zone_key,\n )\n\n\ndef validate_no_datetime(target_datetime: Optional[datetime], zone_key) -> None:\n if target_datetime:\n raise ParserException(\n \"CA_SK.py\",\n \"This parser is unable to fetch historical data.\",\n zone_key,\n )\n\n\ndef fetch_production(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n \"\"\"This parser function will currently return the daily average of the day in question as hourly data.\n This is because the API only returns daily data but the backend expects hourly values.\n This is in order to facilitate the estimation of the hourly values from the daily average.\n \"\"\"\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n response: Response = session.get(PRODUCTION_URL, headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch production data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n # Date is in the format \"Jan 01, 2020\"\n raw_date = raw_data[\"SupplyDataText\"]\n date = datetime.strptime(raw_date, \"%b %d, %Y\")\n production_data = {}\n\n for value in raw_data[\"PowerCacheData\"][\"generationByType\"]:\n production_data[PRODUCTION_MAPPING[value[\"type\"]]] = value[\n \"totalGenerationForType\"\n ]\n\n data_list: List[dict] = []\n # Hack to return hourly data from daily data for the backend as it expects hourly data.\n for hour in range(0, 24):\n data_list.append(\n {\n \"zoneKey\": zone_key,\n \"datetime\": date.replace(hour=hour, tzinfo=TIMEZONE),\n \"production\": production_data,\n \"source\": \"saskpower.com\",\n }\n )\n\n return data_list\n\n\ndef fetch_consumption(\n zone_key: str = \"CA-SK\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n):\n # Validate that the zone key is equal to CA-SK.\n validate_zone_key(zone_key)\n # Validate that the target_datetime is None as this parser is unable to fetch historical data.\n validate_no_datetime(target_datetime, zone_key)\n\n session = session or Session()\n\n # Set the headers to mimic a user browser as the API will return a 403 if not.\n headers = {\"user-agent\": USER_AGENT}\n\n response: Response = session.get(CONSUMPTION_URL) # , headers=headers)\n\n if not response.ok:\n raise ParserException(\n \"CA_SK.py\",\n f\"Failed to fetch consumption data. Response Code: {response.status_code}\\nError:\\n{response.text}\",\n zone_key,\n )\n\n raw_data = response.json()\n\n now = datetime.now(TIMEZONE)\n\n # Data is updated every 5 minutes so we assume the data is from a multiple of 5 minutes and has a 5 minute delay from that multiple.\n assumed_datetime = now.replace(second=0, microsecond=0) - timedelta(\n minutes=(now.minute % 5) + 5\n )\n\n return [\n {\n \"zoneKey\": zone_key,\n \"datetime\": assumed_datetime,\n \"consumption\": int(raw_data),\n \"source\": \"saskpower.com\",\n }\n ]\n", "path": "parsers/CA_SK.py"}]} | 2,156 | 131 |
gh_patches_debug_18454 | rasdani/github-patches | git_diff | napalm-automation__napalm-514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[2.0] "pip3 install napalm" doesn't install requirements
Debian 9.2 (Stretch) with python3 v3.5.3, pip3 v9.0.1
With v1.2.0 a `pip3 install napalm==1.2.0` installs also the required modules (MarkupSafe, jinja2, netaddr, pyYAML, pyeapi, future, pynacl, bcrypt, paramiko, pyFG, scp, netmiko, lxml, pyIOSXR, ncclient, pyserial, junos-eznc, urllib3, idna, certifi, chardet, requests, pynxos, pan-python, requests-toolbelt, xmltodict, pyPluribus, chainmap, librouteros, vyattaconfparser).
With Napalm v2.0.0 no required module is installed with `pip3 install napalm`, so napalm won't work.
</issue>
<code>
[start of setup.py]
1 """setup.py file."""
2 import uuid
3 import os
4
5 from distutils.core import Command
6 from setuptools import setup, find_packages
7 from setuptools.command import install
8
9
10 from pip.req import parse_requirements
11
12 import pip
13 import sys
14
15 __author__ = 'David Barroso <[email protected]>'
16
17 # Read SUPPORTED_DRIVERS from file (without importing)
18 _locals = {}
19 filename = os.path.join('napalm', '_SUPPORTED_DRIVERS.py')
20 with open(filename) as supported:
21 exec(supported.read(), None, _locals)
22 SUPPORTED_DRIVERS = _locals['SUPPORTED_DRIVERS']
23
24
25 def process_requirements(dep):
26 print("PROCESSING DEPENDENCIES FOR {}".format(dep))
27 u = uuid.uuid1()
28 iter_reqs = parse_requirements("requirements/{}".format(dep), session=u)
29 [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]
30
31
32 def custom_command_driver(driver):
33 class CustomCommand(Command):
34 """A custom command to run Pylint on all Python source files."""
35 user_options = []
36
37 def initialize_options(self):
38 pass
39
40 def finalize_options(self):
41 pass
42
43 def run(self):
44 """Run command."""
45 process_requirements(driver)
46
47 return CustomCommand
48
49
50 class CustomInstall(install.install):
51 """A custom command to run Pylint on all Python source files."""
52
53 def run(self):
54 """Run command."""
55 if any([d in sys.argv for d in SUPPORTED_DRIVERS]):
56 process_requirements('base')
57 else:
58 process_requirements('all')
59 install.install.run(self)
60
61
62 custom_commands = {d: custom_command_driver(d) for d in SUPPORTED_DRIVERS}
63 custom_commands['install'] = CustomInstall
64
65 setup(
66 cmdclass=custom_commands,
67 name="napalm",
68 version='2.0.0',
69 packages=find_packages(exclude=("test*", )),
70 test_suite='test_base',
71 author="David Barroso, Kirk Byers, Mircea Ulinic",
72 author_email="[email protected], [email protected], [email protected]",
73 description="Network Automation and Programmability Abstraction Layer with Multivendor support",
74 classifiers=[
75 'Topic :: Utilities',
76 'Programming Language :: Python',
77 'Programming Language :: Python :: 2',
78 'Programming Language :: Python :: 2.7',
79 'Programming Language :: Python :: 3',
80 'Programming Language :: Python :: 3.4',
81 'Programming Language :: Python :: 3.5',
82 'Programming Language :: Python :: 3.6',
83 'Operating System :: POSIX :: Linux',
84 'Operating System :: MacOS',
85 ],
86 url="https://github.com/napalm-automation/napalm",
87 include_package_data=True,
88 install_requires=[],
89 entry_points={
90 'console_scripts': [
91 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',
92 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',
93 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',
94 'napalm=napalm.base.clitools.cl_napalm:main',
95 ],
96 }
97 )
98
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,11 +5,12 @@
from distutils.core import Command
from setuptools import setup, find_packages
from setuptools.command import install
+from subprocess import check_call
from pip.req import parse_requirements
-import pip
+import pip # noqa: test pip is installed
import sys
__author__ = 'David Barroso <[email protected]>'
@@ -26,7 +27,9 @@
print("PROCESSING DEPENDENCIES FOR {}".format(dep))
u = uuid.uuid1()
iter_reqs = parse_requirements("requirements/{}".format(dep), session=u)
- [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]
+
+ for ir in iter_reqs:
+ check_call([sys.executable, '-m', 'pip', 'install', str(ir.req)])
def custom_command_driver(driver):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,11 +5,12 @@\n from distutils.core import Command\n from setuptools import setup, find_packages\n from setuptools.command import install\n+from subprocess import check_call\n \n \n from pip.req import parse_requirements\n \n-import pip\n+import pip # noqa: test pip is installed\n import sys\n \n __author__ = 'David Barroso <[email protected]>'\n@@ -26,7 +27,9 @@\n print(\"PROCESSING DEPENDENCIES FOR {}\".format(dep))\n u = uuid.uuid1()\n iter_reqs = parse_requirements(\"requirements/{}\".format(dep), session=u)\n- [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]\n+\n+ for ir in iter_reqs:\n+ check_call([sys.executable, '-m', 'pip', 'install', str(ir.req)])\n \n \n def custom_command_driver(driver):\n", "issue": "[2.0] \"pip3 install napalm\" doesn't install requirements\nDebian 9.2 (Stretch) with python3 v3.5.3, pip3 v9.0.1\r\n\r\nWith v1.2.0 a `pip3 install napalm==1.2.0` installs also the required modules (MarkupSafe, jinja2, netaddr, pyYAML, pyeapi, future, pynacl, bcrypt, paramiko, pyFG, scp, netmiko, lxml, pyIOSXR, ncclient, pyserial, junos-eznc, urllib3, idna, certifi, chardet, requests, pynxos, pan-python, requests-toolbelt, xmltodict, pyPluribus, chainmap, librouteros, vyattaconfparser).\r\n\r\nWith Napalm v2.0.0 no required module is installed with `pip3 install napalm`, so napalm won't work.\n", "before_files": [{"content": "\"\"\"setup.py file.\"\"\"\nimport uuid\nimport os\n\nfrom distutils.core import Command\nfrom setuptools import setup, find_packages\nfrom setuptools.command import install\n\n\nfrom pip.req import parse_requirements\n\nimport pip\nimport sys\n\n__author__ = 'David Barroso <[email protected]>'\n\n# Read SUPPORTED_DRIVERS from file (without importing)\n_locals = {}\nfilename = os.path.join('napalm', '_SUPPORTED_DRIVERS.py')\nwith open(filename) as supported:\n exec(supported.read(), None, _locals)\n SUPPORTED_DRIVERS = _locals['SUPPORTED_DRIVERS']\n\n\ndef process_requirements(dep):\n print(\"PROCESSING DEPENDENCIES FOR {}\".format(dep))\n u = uuid.uuid1()\n iter_reqs = parse_requirements(\"requirements/{}\".format(dep), session=u)\n [pip.main(['install', (str(ir.req))]) for ir in iter_reqs]\n\n\ndef custom_command_driver(driver):\n class CustomCommand(Command):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n \"\"\"Run command.\"\"\"\n process_requirements(driver)\n\n return CustomCommand\n\n\nclass CustomInstall(install.install):\n \"\"\"A custom command to run Pylint on all Python source files.\"\"\"\n\n def run(self):\n \"\"\"Run command.\"\"\"\n if any([d in sys.argv for d in SUPPORTED_DRIVERS]):\n process_requirements('base')\n else:\n process_requirements('all')\n install.install.run(self)\n\n\ncustom_commands = {d: custom_command_driver(d) for d in SUPPORTED_DRIVERS}\ncustom_commands['install'] = CustomInstall\n\nsetup(\n cmdclass=custom_commands,\n name=\"napalm\",\n version='2.0.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=[],\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}]} | 1,639 | 216 |
gh_patches_debug_34856 | rasdani/github-patches | git_diff | streamlit__streamlit-1903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace Exception in st.foo() with StreamlitAPIException
Main places we do this today:
graphviz_chart.py
map.py
</issue>
<code>
[start of lib/streamlit/elements/graphviz_chart.py]
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Streamlit support for GraphViz charts."""
16
17 from streamlit import type_util
18 from streamlit.logger import get_logger
19 from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
20
21 LOGGER = get_logger(__name__)
22
23
24 class GraphvizMixin:
25 def graphviz_chart(dg, figure_or_dot, width=0, height=0, use_container_width=False):
26 """Display a graph using the dagre-d3 library.
27
28 Parameters
29 ----------
30 figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str
31 The Graphlib graph object or dot string to display
32
33 width : number
34 Deprecated. If != 0 (default), will show an alert.
35 From now on you should set the width directly in the Graphviz
36 spec. Please refer to the Graphviz documentation for details.
37
38 height : number
39 Deprecated. If != 0 (default), will show an alert.
40 From now on you should set the height directly in the Graphviz
41 spec. Please refer to the Graphviz documentation for details.
42
43 use_container_width : bool
44 If True, set the chart width to the column width. This takes
45 precedence over the figure's native `width` value.
46
47 Example
48 -------
49
50 >>> import streamlit as st
51 >>> import graphviz as graphviz
52 >>>
53 >>> # Create a graphlib graph object
54 >>> graph = graphviz.Digraph()
55 >>> graph.edge('run', 'intr')
56 >>> graph.edge('intr', 'runbl')
57 >>> graph.edge('runbl', 'run')
58 >>> graph.edge('run', 'kernel')
59 >>> graph.edge('kernel', 'zombie')
60 >>> graph.edge('kernel', 'sleep')
61 >>> graph.edge('kernel', 'runmem')
62 >>> graph.edge('sleep', 'swap')
63 >>> graph.edge('swap', 'runswap')
64 >>> graph.edge('runswap', 'new')
65 >>> graph.edge('runswap', 'runmem')
66 >>> graph.edge('new', 'runmem')
67 >>> graph.edge('sleep', 'runmem')
68 >>>
69 >>> st.graphviz_chart(graph)
70
71 Or you can render the chart from the graph using GraphViz's Dot
72 language:
73
74 >>> st.graphviz_chart('''
75 digraph {
76 run -> intr
77 intr -> runbl
78 runbl -> run
79 run -> kernel
80 kernel -> zombie
81 kernel -> sleep
82 kernel -> runmem
83 sleep -> swap
84 swap -> runswap
85 runswap -> new
86 runswap -> runmem
87 new -> runmem
88 sleep -> runmem
89 }
90 ''')
91
92 .. output::
93 https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL
94 height: 400px
95
96 """
97 if width != 0 and height != 0:
98 import streamlit as st
99
100 st.warning(
101 "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04"
102 )
103 elif width != 0:
104 import streamlit as st
105
106 st.warning(
107 "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04"
108 )
109 elif height != 0:
110 import streamlit as st
111
112 st.warning(
113 "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04"
114 )
115
116 graphviz_chart_proto = GraphVizChartProto()
117 marshall(graphviz_chart_proto, figure_or_dot, use_container_width)
118 return dg._enqueue("graphviz_chart", graphviz_chart_proto) # type: ignore
119
120
121 def marshall(proto, figure_or_dot, use_container_width):
122 """Construct a GraphViz chart object.
123
124 See DeltaGenerator.graphviz_chart for docs.
125 """
126
127 if type_util.is_graphviz_chart(figure_or_dot):
128 dot = figure_or_dot.source
129 elif isinstance(figure_or_dot, str):
130 dot = figure_or_dot
131 else:
132 raise Exception("Unhandled type for graphviz chart: %s" % type(figure_or_dot))
133
134 proto.spec = dot
135 proto.use_container_width = use_container_width
136
[end of lib/streamlit/elements/graphviz_chart.py]
[start of lib/streamlit/elements/map.py]
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """A wrapper for simple PyDeck scatter charts."""
16
17 import copy
18 import json
19 from typing import Any, Dict
20
21 import pandas as pd
22
23 from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
24 import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
25
26
27 class MapMixin:
28 def map(dg, data=None, zoom=None, use_container_width=True):
29 """Display a map with points on it.
30
31 This is a wrapper around st.pydeck_chart to quickly create scatterplot
32 charts on top of a map, with auto-centering and auto-zoom.
33
34 When using this command, we advise all users to use a personal Mapbox
35 token. This ensures the map tiles used in this chart are more
36 robust. You can do this with the mapbox.token config option.
37
38 To get a token for yourself, create an account at
39 https://mapbox.com. It's free! (for moderate usage levels) See
40 https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more
41 info on how to set config options.
42
43 Parameters
44 ----------
45 data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,
46 or None
47 The data to be plotted. Must have columns called 'lat', 'lon',
48 'latitude', or 'longitude'.
49 zoom : int
50 Zoom level as specified in
51 https://wiki.openstreetmap.org/wiki/Zoom_levels
52
53 Example
54 -------
55 >>> import pandas as pd
56 >>> import numpy as np
57 >>>
58 >>> df = pd.DataFrame(
59 ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
60 ... columns=['lat', 'lon'])
61 >>>
62 >>> st.map(df)
63
64 .. output::
65 https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH
66 height: 600px
67
68 """
69 map_proto = DeckGlJsonChartProto()
70 map_proto.json = to_deckgl_json(data, zoom)
71 map_proto.use_container_width = use_container_width
72 return dg._enqueue("deck_gl_json_chart", map_proto) # type: ignore
73
74
75 # Map used as the basis for st.map.
76 _DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]
77 _DEFAULT_MAP["mapStyle"] = "mapbox://styles/mapbox/light-v10"
78
79 # Other default parameters for st.map.
80 _DEFAULT_COLOR = [200, 30, 0, 160]
81 _ZOOM_LEVELS = [
82 360,
83 180,
84 90,
85 45,
86 22.5,
87 11.25,
88 5.625,
89 2.813,
90 1.406,
91 0.703,
92 0.352,
93 0.176,
94 0.088,
95 0.044,
96 0.022,
97 0.011,
98 0.005,
99 0.003,
100 0.001,
101 0.0005,
102 ]
103
104
105 def _get_zoom_level(distance):
106 """Get the zoom level for a given distance in degrees.
107
108 See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.
109
110 Parameters
111 ----------
112 distance : float
113 How many degrees of longitude should fit in the map.
114
115 Returns
116 -------
117 int
118 The zoom level, from 0 to 29.
119
120 """
121
122 for i in range(len(_ZOOM_LEVELS) - 1):
123 if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:
124 return i
125
126
127 def to_deckgl_json(data, zoom):
128
129 if data is None or data.empty:
130 return json.dumps(_DEFAULT_MAP)
131
132 if "lat" in data:
133 lat = "lat"
134 elif "latitude" in data:
135 lat = "latitude"
136 else:
137 raise Exception('Map data must contain a column named "latitude" or "lat".')
138
139 if "lon" in data:
140 lon = "lon"
141 elif "longitude" in data:
142 lon = "longitude"
143 else:
144 raise Exception('Map data must contain a column called "longitude" or "lon".')
145
146 if data[lon].isnull().values.any() or data[lat].isnull().values.any():
147 raise Exception("Latitude and longitude data must be numeric.")
148
149 data = pd.DataFrame(data)
150
151 min_lat = data[lat].min()
152 max_lat = data[lat].max()
153 min_lon = data[lon].min()
154 max_lon = data[lon].max()
155 center_lat = (max_lat + min_lat) / 2.0
156 center_lon = (max_lon + min_lon) / 2.0
157 range_lon = abs(max_lon - min_lon)
158 range_lat = abs(max_lat - min_lat)
159
160 if zoom == None:
161 if range_lon > range_lat:
162 longitude_distance = range_lon
163 else:
164 longitude_distance = range_lat
165 zoom = _get_zoom_level(longitude_distance)
166
167 # "+1" because itertuples includes the row index.
168 lon_col_index = data.columns.get_loc(lon) + 1
169 lat_col_index = data.columns.get_loc(lat) + 1
170 final_data = []
171 for row in data.itertuples():
172 final_data.append(
173 {"lon": float(row[lon_col_index]), "lat": float(row[lat_col_index])}
174 )
175
176 default = copy.deepcopy(_DEFAULT_MAP)
177 default["initialViewState"]["latitude"] = center_lat
178 default["initialViewState"]["longitude"] = center_lon
179 default["initialViewState"]["zoom"] = zoom
180 default["layers"] = [
181 {
182 "@@type": "ScatterplotLayer",
183 "getPosition": "@@=[lon, lat]",
184 "getRadius": 10,
185 "radiusScale": 10,
186 "radiusMinPixels": 3,
187 "getFillColor": _DEFAULT_COLOR,
188 "data": final_data,
189 }
190 ]
191 return json.dumps(default)
192
[end of lib/streamlit/elements/map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/elements/graphviz_chart.py b/lib/streamlit/elements/graphviz_chart.py
--- a/lib/streamlit/elements/graphviz_chart.py
+++ b/lib/streamlit/elements/graphviz_chart.py
@@ -17,6 +17,7 @@
from streamlit import type_util
from streamlit.logger import get_logger
from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto
+from streamlit.errors import StreamlitAPIException
LOGGER = get_logger(__name__)
@@ -129,7 +130,9 @@
elif isinstance(figure_or_dot, str):
dot = figure_or_dot
else:
- raise Exception("Unhandled type for graphviz chart: %s" % type(figure_or_dot))
+ raise StreamlitAPIException(
+ "Unhandled type for graphviz chart: %s" % type(figure_or_dot)
+ )
proto.spec = dot
proto.use_container_width = use_container_width
diff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py
--- a/lib/streamlit/elements/map.py
+++ b/lib/streamlit/elements/map.py
@@ -22,6 +22,7 @@
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
+from streamlit.errors import StreamlitAPIException
class MapMixin:
@@ -134,17 +135,21 @@
elif "latitude" in data:
lat = "latitude"
else:
- raise Exception('Map data must contain a column named "latitude" or "lat".')
+ raise StreamlitAPIException(
+ 'Map data must contain a column named "latitude" or "lat".'
+ )
if "lon" in data:
lon = "lon"
elif "longitude" in data:
lon = "longitude"
else:
- raise Exception('Map data must contain a column called "longitude" or "lon".')
+ raise StreamlitAPIException(
+ 'Map data must contain a column called "longitude" or "lon".'
+ )
if data[lon].isnull().values.any() or data[lat].isnull().values.any():
- raise Exception("Latitude and longitude data must be numeric.")
+ raise StreamlitAPIException("Latitude and longitude data must be numeric.")
data = pd.DataFrame(data)
| {"golden_diff": "diff --git a/lib/streamlit/elements/graphviz_chart.py b/lib/streamlit/elements/graphviz_chart.py\n--- a/lib/streamlit/elements/graphviz_chart.py\n+++ b/lib/streamlit/elements/graphviz_chart.py\n@@ -17,6 +17,7 @@\n from streamlit import type_util\n from streamlit.logger import get_logger\n from streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto\n+from streamlit.errors import StreamlitAPIException\n \n LOGGER = get_logger(__name__)\n \n@@ -129,7 +130,9 @@\n elif isinstance(figure_or_dot, str):\n dot = figure_or_dot\n else:\n- raise Exception(\"Unhandled type for graphviz chart: %s\" % type(figure_or_dot))\n+ raise StreamlitAPIException(\n+ \"Unhandled type for graphviz chart: %s\" % type(figure_or_dot)\n+ )\n \n proto.spec = dot\n proto.use_container_width = use_container_width\ndiff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py\n--- a/lib/streamlit/elements/map.py\n+++ b/lib/streamlit/elements/map.py\n@@ -22,6 +22,7 @@\n \n from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\n import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\n+from streamlit.errors import StreamlitAPIException\n \n \n class MapMixin:\n@@ -134,17 +135,21 @@\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n- raise Exception('Map data must contain a column named \"latitude\" or \"lat\".')\n+ raise StreamlitAPIException(\n+ 'Map data must contain a column named \"latitude\" or \"lat\".'\n+ )\n \n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n- raise Exception('Map data must contain a column called \"longitude\" or \"lon\".')\n+ raise StreamlitAPIException(\n+ 'Map data must contain a column called \"longitude\" or \"lon\".'\n+ )\n \n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n- raise Exception(\"Latitude and longitude data must be numeric.\")\n+ raise StreamlitAPIException(\"Latitude and longitude data must be numeric.\")\n \n data = pd.DataFrame(data)\n", "issue": "Replace Exception in st.foo() with StreamlitAPIException\nMain places we do this today:\r\ngraphviz_chart.py\r\nmap.py\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit support for GraphViz charts.\"\"\"\n\nfrom streamlit import type_util\nfrom streamlit.logger import get_logger\nfrom streamlit.proto.GraphVizChart_pb2 import GraphVizChart as GraphVizChartProto\n\nLOGGER = get_logger(__name__)\n\n\nclass GraphvizMixin:\n def graphviz_chart(dg, figure_or_dot, width=0, height=0, use_container_width=False):\n \"\"\"Display a graph using the dagre-d3 library.\n\n Parameters\n ----------\n figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str\n The Graphlib graph object or dot string to display\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n height : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the height directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n Example\n -------\n\n >>> import streamlit as st\n >>> import graphviz as graphviz\n >>>\n >>> # Create a graphlib graph object\n >>> graph = graphviz.Digraph()\n >>> graph.edge('run', 'intr')\n >>> graph.edge('intr', 'runbl')\n >>> graph.edge('runbl', 'run')\n >>> graph.edge('run', 'kernel')\n >>> graph.edge('kernel', 'zombie')\n >>> graph.edge('kernel', 'sleep')\n >>> graph.edge('kernel', 'runmem')\n >>> graph.edge('sleep', 'swap')\n >>> graph.edge('swap', 'runswap')\n >>> graph.edge('runswap', 'new')\n >>> graph.edge('runswap', 'runmem')\n >>> graph.edge('new', 'runmem')\n >>> graph.edge('sleep', 'runmem')\n >>>\n >>> st.graphviz_chart(graph)\n\n Or you can render the chart from the graph using GraphViz's Dot\n language:\n\n >>> st.graphviz_chart('''\n digraph {\n run -> intr\n intr -> runbl\n runbl -> run\n run -> kernel\n kernel -> zombie\n kernel -> sleep\n kernel -> runmem\n sleep -> swap\n swap -> runswap\n runswap -> new\n runswap -> runmem\n new -> runmem\n sleep -> runmem\n }\n ''')\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL\n height: 400px\n\n \"\"\"\n if width != 0 and height != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04\"\n )\n elif width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n elif height != 0:\n import streamlit as st\n\n st.warning(\n \"The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n\n graphviz_chart_proto = GraphVizChartProto()\n marshall(graphviz_chart_proto, figure_or_dot, use_container_width)\n return dg._enqueue(\"graphviz_chart\", graphviz_chart_proto) # type: ignore\n\n\ndef marshall(proto, figure_or_dot, use_container_width):\n \"\"\"Construct a GraphViz chart object.\n\n See DeltaGenerator.graphviz_chart for docs.\n \"\"\"\n\n if type_util.is_graphviz_chart(figure_or_dot):\n dot = figure_or_dot.source\n elif isinstance(figure_or_dot, str):\n dot = figure_or_dot\n else:\n raise Exception(\"Unhandled type for graphviz chart: %s\" % type(figure_or_dot))\n\n proto.spec = dot\n proto.use_container_width = use_container_width\n", "path": "lib/streamlit/elements/graphviz_chart.py"}, {"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A wrapper for simple PyDeck scatter charts.\"\"\"\n\nimport copy\nimport json\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\nimport streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\n\n\nclass MapMixin:\n def map(dg, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n map_proto = DeckGlJsonChartProto()\n map_proto.json = to_deckgl_json(data, zoom)\n map_proto.use_container_width = use_container_width\n return dg._enqueue(\"deck_gl_json_chart\", map_proto) # type: ignore\n\n\n# Map used as the basis for st.map.\n_DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]\n_DEFAULT_MAP[\"mapStyle\"] = \"mapbox://styles/mapbox/light-v10\"\n\n# Other default parameters for st.map.\n_DEFAULT_COLOR = [200, 30, 0, 160]\n_ZOOM_LEVELS = [\n 360,\n 180,\n 90,\n 45,\n 22.5,\n 11.25,\n 5.625,\n 2.813,\n 1.406,\n 0.703,\n 0.352,\n 0.176,\n 0.088,\n 0.044,\n 0.022,\n 0.011,\n 0.005,\n 0.003,\n 0.001,\n 0.0005,\n]\n\n\ndef _get_zoom_level(distance):\n \"\"\"Get the zoom level for a given distance in degrees.\n\n See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.\n\n Parameters\n ----------\n distance : float\n How many degrees of longitude should fit in the map.\n\n Returns\n -------\n int\n The zoom level, from 0 to 29.\n\n \"\"\"\n\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n\n\ndef to_deckgl_json(data, zoom):\n\n if data is None or data.empty:\n return json.dumps(_DEFAULT_MAP)\n\n if \"lat\" in data:\n lat = \"lat\"\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n raise Exception('Map data must contain a column named \"latitude\" or \"lat\".')\n\n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n raise Exception('Map data must contain a column called \"longitude\" or \"lon\".')\n\n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n raise Exception(\"Latitude and longitude data must be numeric.\")\n\n data = pd.DataFrame(data)\n\n min_lat = data[lat].min()\n max_lat = data[lat].max()\n min_lon = data[lon].min()\n max_lon = data[lon].max()\n center_lat = (max_lat + min_lat) / 2.0\n center_lon = (max_lon + min_lon) / 2.0\n range_lon = abs(max_lon - min_lon)\n range_lat = abs(max_lat - min_lat)\n\n if zoom == None:\n if range_lon > range_lat:\n longitude_distance = range_lon\n else:\n longitude_distance = range_lat\n zoom = _get_zoom_level(longitude_distance)\n\n # \"+1\" because itertuples includes the row index.\n lon_col_index = data.columns.get_loc(lon) + 1\n lat_col_index = data.columns.get_loc(lat) + 1\n final_data = []\n for row in data.itertuples():\n final_data.append(\n {\"lon\": float(row[lon_col_index]), \"lat\": float(row[lat_col_index])}\n )\n\n default = copy.deepcopy(_DEFAULT_MAP)\n default[\"initialViewState\"][\"latitude\"] = center_lat\n default[\"initialViewState\"][\"longitude\"] = center_lon\n default[\"initialViewState\"][\"zoom\"] = zoom\n default[\"layers\"] = [\n {\n \"@@type\": \"ScatterplotLayer\",\n \"getPosition\": \"@@=[lon, lat]\",\n \"getRadius\": 10,\n \"radiusScale\": 10,\n \"radiusMinPixels\": 3,\n \"getFillColor\": _DEFAULT_COLOR,\n \"data\": final_data,\n }\n ]\n return json.dumps(default)\n", "path": "lib/streamlit/elements/map.py"}]} | 4,025 | 550 |
gh_patches_debug_7728 | rasdani/github-patches | git_diff | comic__grand-challenge.org-838 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove unused CIRRUS environment variables
These variables are now deprecated.
https://github.com/comic/grand-challenge.org/blob/30875fd388f2ad14212cf57c0caa8b9efcba19d9/app/grandchallenge/workstations/models.py#L201
</issue>
<code>
[start of app/grandchallenge/workstations/models.py]
1 from datetime import timedelta, datetime
2 from urllib.parse import unquote, urljoin
3
4 from django.conf import settings
5 from django.core.validators import MaxValueValidator, RegexValidator
6 from django.db import models
7 from django_extensions.db.models import TitleSlugDescriptionModel
8 from rest_framework.authtoken.models import Token
9 from simple_history.models import HistoricalRecords
10
11 from grandchallenge.challenges.models import get_logo_path
12 from grandchallenge.container_exec.backends.docker import Service
13 from grandchallenge.container_exec.models import ContainerImageModel
14 from grandchallenge.container_exec.tasks import start_service, stop_service
15 from grandchallenge.core.models import UUIDModel
16 from grandchallenge.subdomains.utils import reverse
17
18 __doc__ = """
19 Workstations are used to view, annotate and upload images to grand challenge.
20 A `workstation admin` is able to upload a ``WorkstationImage``, which is a docker container image.
21 A ``WorkstationImage`` expose a http and, optionally, a websocket port.
22 A `workstation user` can then launch a workstation ``Session`` for a particular ``WorkstationImage``.
23
24 When a new session is started, a new container instance of the selected ``WorkstationImage`` is lauched on the docker host.
25 The connection to the container will be proxied, and only accessible to the user that created the session.
26 The proxy will map the http and websocket connections from the user to the running instance, which is mapped by the container hostname.
27 The container instance will have the users API token set in the environment, so that it is able to interact with the grand challenge API as this user.
28 The user is able to stop the container, otherwise it will be terminated after ``maxmium_duration`` is reached.
29 """
30
31
32 class Workstation(UUIDModel, TitleSlugDescriptionModel):
33 """ This model holds the title and description of a workstation. """
34
35 logo = models.ImageField(upload_to=get_logo_path)
36
37 @property
38 def latest_ready_image(self):
39 """
40 Returns
41 -------
42 The most recent container image for this workstation
43 """
44 return (
45 self.workstationimage_set.filter(ready=True)
46 .order_by("-created")
47 .first()
48 )
49
50 def __str__(self):
51 return f"Workstation {self.title}"
52
53 def get_absolute_url(self):
54 return reverse("workstations:detail", kwargs={"slug": self.slug})
55
56
57 class WorkstationImage(UUIDModel, ContainerImageModel):
58 """
59 A ``WorkstationImage`` is a docker container image of a workstation.
60
61 Parameters
62 ----------
63 workstation
64 A ``Workstation`` can have multiple ``WorkstationImage``, that
65 represent different versions of a workstation
66 http_port
67 This container will expose a http server on this port
68 websocket_port
69 This container will expose a websocket on this port. Any relative url
70 that starts with ``/mlab4d4c4142`` will be proxied to this port.
71 initial_path
72 The initial path that users will navigate to in order to load the
73 workstation
74 """
75
76 workstation = models.ForeignKey(Workstation, on_delete=models.CASCADE)
77 http_port = models.PositiveIntegerField(
78 default=8080, validators=[MaxValueValidator(2 ** 16 - 1)]
79 )
80 websocket_port = models.PositiveIntegerField(
81 default=4114, validators=[MaxValueValidator(2 ** 16 - 1)]
82 )
83 initial_path = models.CharField(
84 max_length=256,
85 default="Applications/GrandChallengeViewer/index.html",
86 blank=True,
87 validators=[
88 RegexValidator(
89 regex=r"^(?:[^/][^\s]*)\Z",
90 message="This path is invalid, it must not start with a /",
91 )
92 ],
93 )
94
95 def __str__(self):
96 return f"Workstation Image {self.pk}"
97
98 def get_absolute_url(self):
99 return reverse(
100 "workstations:image-detail",
101 kwargs={"slug": self.workstation.slug, "pk": self.pk},
102 )
103
104
105 class Session(UUIDModel):
106 """
107 Tracks who has launched workstation images. The ``WorkstationImage`` will
108 be launched as a ``Service``. The ``Session`` is responsible for starting
109 and stopping the ``Service``.
110
111 Parameters
112 ----------
113
114 status
115 Stores what has happened with the service, is it running, errored, etc?
116 creator
117 Who created the session? This is also the only user that should be able
118 to access the launched service.
119 workstation_image
120 The container image that will be launched by this ``Session``.
121 maximum_duration
122 The maximum time that the service can be active before it is terminated
123 user_finished
124 Indicates if the user has chosen to end the session early
125 history
126 The history of this Session
127 """
128
129 QUEUED = 0
130 STARTED = 1
131 RUNNING = 2
132 FAILED = 3
133 STOPPED = 4
134
135 # These should match the values in session.js
136 STATUS_CHOICES = (
137 (QUEUED, "Queued"),
138 (STARTED, "Started"),
139 (RUNNING, "Running"),
140 (FAILED, "Failed"),
141 (STOPPED, "Stopped"),
142 )
143
144 status = models.PositiveSmallIntegerField(
145 choices=STATUS_CHOICES, default=QUEUED
146 )
147 creator = models.ForeignKey(
148 settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
149 )
150 workstation_image = models.ForeignKey(
151 WorkstationImage, on_delete=models.CASCADE
152 )
153 maximum_duration = models.DurationField(default=timedelta(minutes=10))
154 user_finished = models.BooleanField(default=False)
155 history = HistoricalRecords()
156
157 def __str__(self):
158 return f"Session {self.pk}"
159
160 @property
161 def task_kwargs(self) -> dict:
162 """
163 Returns
164 -------
165 The kwargs that need to be passed to celery to get this object
166 """
167 return {
168 "app_label": self._meta.app_label,
169 "model_name": self._meta.model_name,
170 "pk": self.pk,
171 }
172
173 @property
174 def hostname(self) -> str:
175 """
176 Returns
177 -------
178 The unique hostname for this session
179 """
180 return (
181 f"{self.pk}.{self._meta.model_name}.{self._meta.app_label}".lower()
182 )
183
184 @property
185 def expires_at(self) -> datetime:
186 """
187 Returns
188 -------
189 The time when this session expires.
190 """
191 return self.created + self.maximum_duration
192
193 @property
194 def environment(self) -> dict:
195 """
196 Returns
197 -------
198 The environment variables that should be set on the container.
199 """
200 env = {
201 "GRAND_CHALLENGE_PROXY_URL_MAPPINGS": "",
202 "GRAND_CHALLENGE_QUERY_IMAGE_URL": unquote(
203 reverse("api:image-detail", kwargs={"pk": "{key}"})
204 ),
205 }
206
207 if self.creator:
208 env.update(
209 {
210 "GRAND_CHALLENGE_AUTHORIZATION": f"TOKEN {Token.objects.get_or_create(user=self.creator)[0].key}"
211 }
212 )
213
214 if settings.DEBUG:
215 # Allow the container to communicate with the dev environment
216 env.update({"GRAND_CHALLENGE_UNSAFE": "True"})
217
218 return env
219
220 @property
221 def service(self) -> Service:
222 """
223 Returns
224 -------
225 The service for this session, could be active or inactive.
226 """
227 return Service(
228 job_id=self.pk,
229 job_model=f"{self._meta.app_label}-{self._meta.model_name}",
230 exec_image=self.workstation_image.image,
231 exec_image_sha256=self.workstation_image.image_sha256,
232 )
233
234 @property
235 def workstation_url(self) -> str:
236 """
237 Returns
238 -------
239 The url that users will use to access the workstation instance.
240 """
241 return urljoin(
242 self.get_absolute_url(), self.workstation_image.initial_path
243 )
244
245 def start(self) -> None:
246 """
247 Starts the service for this session, ensuring that the
248 ``workstation_image`` is ready to be used and that
249 ``WORKSTATIONS_MAXIMUM_SESSIONS`` has not been reached.
250
251 Raises
252 ------
253 RunTimeError
254 If the service cannot be started.
255
256 Returns
257 -------
258 """
259 try:
260 if not self.workstation_image.ready:
261 raise RuntimeError("Workstation image was not ready")
262
263 if (
264 Session.objects.all()
265 .filter(status__in=[Session.RUNNING, Session.STARTED])
266 .count()
267 >= settings.WORKSTATIONS_MAXIMUM_SESSIONS
268 ):
269 raise RuntimeError("Too many sessions are running")
270
271 self.service.start(
272 http_port=self.workstation_image.http_port,
273 websocket_port=self.workstation_image.websocket_port,
274 hostname=self.hostname,
275 environment=self.environment,
276 )
277 self.update_status(status=self.STARTED)
278 except RuntimeError:
279 self.update_status(status=self.FAILED)
280
281 def stop(self) -> None:
282 """
283 Stops the service for this session, cleaning up all of the containers.
284
285 Returns
286 -------
287 """
288 self.service.stop_and_cleanup()
289 self.update_status(status=self.STOPPED)
290
291 def update_status(self, *, status: STATUS_CHOICES) -> None:
292 """
293 Updates the status of this session.
294
295 Parameters
296 ----------
297 status
298 The new status for this session.
299
300 Returns
301 -------
302 """
303 self.status = status
304 self.save()
305
306 def get_absolute_url(self):
307 return reverse(
308 "workstations:session-detail",
309 kwargs={
310 "slug": self.workstation_image.workstation.slug,
311 "pk": self.pk,
312 },
313 )
314
315 def save(self, *args, **kwargs) -> None:
316 """
317 Saves the session instance, starting or stopping the service if needed.
318
319 Returns
320 -------
321 """
322 created = self._state.adding
323
324 super().save(*args, **kwargs)
325
326 if created:
327 start_service.apply_async(kwargs=self.task_kwargs)
328 elif self.user_finished and self.status != self.STOPPED:
329 stop_service.apply_async(kwargs=self.task_kwargs)
330
[end of app/grandchallenge/workstations/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/workstations/models.py b/app/grandchallenge/workstations/models.py
--- a/app/grandchallenge/workstations/models.py
+++ b/app/grandchallenge/workstations/models.py
@@ -197,12 +197,7 @@
-------
The environment variables that should be set on the container.
"""
- env = {
- "GRAND_CHALLENGE_PROXY_URL_MAPPINGS": "",
- "GRAND_CHALLENGE_QUERY_IMAGE_URL": unquote(
- reverse("api:image-detail", kwargs={"pk": "{key}"})
- ),
- }
+ env = {"GRAND_CHALLENGE_API_ROOT": unquote(reverse("api:api-root"))}
if self.creator:
env.update(
| {"golden_diff": "diff --git a/app/grandchallenge/workstations/models.py b/app/grandchallenge/workstations/models.py\n--- a/app/grandchallenge/workstations/models.py\n+++ b/app/grandchallenge/workstations/models.py\n@@ -197,12 +197,7 @@\n -------\n The environment variables that should be set on the container.\n \"\"\"\n- env = {\n- \"GRAND_CHALLENGE_PROXY_URL_MAPPINGS\": \"\",\n- \"GRAND_CHALLENGE_QUERY_IMAGE_URL\": unquote(\n- reverse(\"api:image-detail\", kwargs={\"pk\": \"{key}\"})\n- ),\n- }\n+ env = {\"GRAND_CHALLENGE_API_ROOT\": unquote(reverse(\"api:api-root\"))}\n \n if self.creator:\n env.update(\n", "issue": "Remove unused CIRRUS environment variables\nThese variables are now deprecated.\r\n\r\nhttps://github.com/comic/grand-challenge.org/blob/30875fd388f2ad14212cf57c0caa8b9efcba19d9/app/grandchallenge/workstations/models.py#L201\n", "before_files": [{"content": "from datetime import timedelta, datetime\nfrom urllib.parse import unquote, urljoin\n\nfrom django.conf import settings\nfrom django.core.validators import MaxValueValidator, RegexValidator\nfrom django.db import models\nfrom django_extensions.db.models import TitleSlugDescriptionModel\nfrom rest_framework.authtoken.models import Token\nfrom simple_history.models import HistoricalRecords\n\nfrom grandchallenge.challenges.models import get_logo_path\nfrom grandchallenge.container_exec.backends.docker import Service\nfrom grandchallenge.container_exec.models import ContainerImageModel\nfrom grandchallenge.container_exec.tasks import start_service, stop_service\nfrom grandchallenge.core.models import UUIDModel\nfrom grandchallenge.subdomains.utils import reverse\n\n__doc__ = \"\"\"\nWorkstations are used to view, annotate and upload images to grand challenge.\nA `workstation admin` is able to upload a ``WorkstationImage``, which is a docker container image.\nA ``WorkstationImage`` expose a http and, optionally, a websocket port.\nA `workstation user` can then launch a workstation ``Session`` for a particular ``WorkstationImage``.\n\nWhen a new session is started, a new container instance of the selected ``WorkstationImage`` is lauched on the docker host.\nThe connection to the container will be proxied, and only accessible to the user that created the session.\nThe proxy will map the http and websocket connections from the user to the running instance, which is mapped by the container hostname.\nThe container instance will have the users API token set in the environment, so that it is able to interact with the grand challenge API as this user.\nThe user is able to stop the container, otherwise it will be terminated after ``maxmium_duration`` is reached.\n\"\"\"\n\n\nclass Workstation(UUIDModel, TitleSlugDescriptionModel):\n \"\"\" This model holds the title and description of a workstation. \"\"\"\n\n logo = models.ImageField(upload_to=get_logo_path)\n\n @property\n def latest_ready_image(self):\n \"\"\"\n Returns\n -------\n The most recent container image for this workstation\n \"\"\"\n return (\n self.workstationimage_set.filter(ready=True)\n .order_by(\"-created\")\n .first()\n )\n\n def __str__(self):\n return f\"Workstation {self.title}\"\n\n def get_absolute_url(self):\n return reverse(\"workstations:detail\", kwargs={\"slug\": self.slug})\n\n\nclass WorkstationImage(UUIDModel, ContainerImageModel):\n \"\"\"\n A ``WorkstationImage`` is a docker container image of a workstation.\n\n Parameters\n ----------\n workstation\n A ``Workstation`` can have multiple ``WorkstationImage``, that\n represent different versions of a workstation\n http_port\n This container will expose a http server on this port\n websocket_port\n This container will expose a websocket on this port. Any relative url\n that starts with ``/mlab4d4c4142`` will be proxied to this port.\n initial_path\n The initial path that users will navigate to in order to load the\n workstation\n \"\"\"\n\n workstation = models.ForeignKey(Workstation, on_delete=models.CASCADE)\n http_port = models.PositiveIntegerField(\n default=8080, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n websocket_port = models.PositiveIntegerField(\n default=4114, validators=[MaxValueValidator(2 ** 16 - 1)]\n )\n initial_path = models.CharField(\n max_length=256,\n default=\"Applications/GrandChallengeViewer/index.html\",\n blank=True,\n validators=[\n RegexValidator(\n regex=r\"^(?:[^/][^\\s]*)\\Z\",\n message=\"This path is invalid, it must not start with a /\",\n )\n ],\n )\n\n def __str__(self):\n return f\"Workstation Image {self.pk}\"\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:image-detail\",\n kwargs={\"slug\": self.workstation.slug, \"pk\": self.pk},\n )\n\n\nclass Session(UUIDModel):\n \"\"\"\n Tracks who has launched workstation images. The ``WorkstationImage`` will\n be launched as a ``Service``. The ``Session`` is responsible for starting\n and stopping the ``Service``.\n\n Parameters\n ----------\n\n status\n Stores what has happened with the service, is it running, errored, etc?\n creator\n Who created the session? This is also the only user that should be able\n to access the launched service.\n workstation_image\n The container image that will be launched by this ``Session``.\n maximum_duration\n The maximum time that the service can be active before it is terminated\n user_finished\n Indicates if the user has chosen to end the session early\n history\n The history of this Session\n \"\"\"\n\n QUEUED = 0\n STARTED = 1\n RUNNING = 2\n FAILED = 3\n STOPPED = 4\n\n # These should match the values in session.js\n STATUS_CHOICES = (\n (QUEUED, \"Queued\"),\n (STARTED, \"Started\"),\n (RUNNING, \"Running\"),\n (FAILED, \"Failed\"),\n (STOPPED, \"Stopped\"),\n )\n\n status = models.PositiveSmallIntegerField(\n choices=STATUS_CHOICES, default=QUEUED\n )\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL\n )\n workstation_image = models.ForeignKey(\n WorkstationImage, on_delete=models.CASCADE\n )\n maximum_duration = models.DurationField(default=timedelta(minutes=10))\n user_finished = models.BooleanField(default=False)\n history = HistoricalRecords()\n\n def __str__(self):\n return f\"Session {self.pk}\"\n\n @property\n def task_kwargs(self) -> dict:\n \"\"\"\n Returns\n -------\n The kwargs that need to be passed to celery to get this object\n \"\"\"\n return {\n \"app_label\": self._meta.app_label,\n \"model_name\": self._meta.model_name,\n \"pk\": self.pk,\n }\n\n @property\n def hostname(self) -> str:\n \"\"\"\n Returns\n -------\n The unique hostname for this session\n \"\"\"\n return (\n f\"{self.pk}.{self._meta.model_name}.{self._meta.app_label}\".lower()\n )\n\n @property\n def expires_at(self) -> datetime:\n \"\"\"\n Returns\n -------\n The time when this session expires.\n \"\"\"\n return self.created + self.maximum_duration\n\n @property\n def environment(self) -> dict:\n \"\"\"\n Returns\n -------\n The environment variables that should be set on the container.\n \"\"\"\n env = {\n \"GRAND_CHALLENGE_PROXY_URL_MAPPINGS\": \"\",\n \"GRAND_CHALLENGE_QUERY_IMAGE_URL\": unquote(\n reverse(\"api:image-detail\", kwargs={\"pk\": \"{key}\"})\n ),\n }\n\n if self.creator:\n env.update(\n {\n \"GRAND_CHALLENGE_AUTHORIZATION\": f\"TOKEN {Token.objects.get_or_create(user=self.creator)[0].key}\"\n }\n )\n\n if settings.DEBUG:\n # Allow the container to communicate with the dev environment\n env.update({\"GRAND_CHALLENGE_UNSAFE\": \"True\"})\n\n return env\n\n @property\n def service(self) -> Service:\n \"\"\"\n Returns\n -------\n The service for this session, could be active or inactive.\n \"\"\"\n return Service(\n job_id=self.pk,\n job_model=f\"{self._meta.app_label}-{self._meta.model_name}\",\n exec_image=self.workstation_image.image,\n exec_image_sha256=self.workstation_image.image_sha256,\n )\n\n @property\n def workstation_url(self) -> str:\n \"\"\"\n Returns\n -------\n The url that users will use to access the workstation instance.\n \"\"\"\n return urljoin(\n self.get_absolute_url(), self.workstation_image.initial_path\n )\n\n def start(self) -> None:\n \"\"\"\n Starts the service for this session, ensuring that the\n ``workstation_image`` is ready to be used and that\n ``WORKSTATIONS_MAXIMUM_SESSIONS`` has not been reached.\n\n Raises\n ------\n RunTimeError\n If the service cannot be started.\n\n Returns\n -------\n \"\"\"\n try:\n if not self.workstation_image.ready:\n raise RuntimeError(\"Workstation image was not ready\")\n\n if (\n Session.objects.all()\n .filter(status__in=[Session.RUNNING, Session.STARTED])\n .count()\n >= settings.WORKSTATIONS_MAXIMUM_SESSIONS\n ):\n raise RuntimeError(\"Too many sessions are running\")\n\n self.service.start(\n http_port=self.workstation_image.http_port,\n websocket_port=self.workstation_image.websocket_port,\n hostname=self.hostname,\n environment=self.environment,\n )\n self.update_status(status=self.STARTED)\n except RuntimeError:\n self.update_status(status=self.FAILED)\n\n def stop(self) -> None:\n \"\"\"\n Stops the service for this session, cleaning up all of the containers.\n\n Returns\n -------\n \"\"\"\n self.service.stop_and_cleanup()\n self.update_status(status=self.STOPPED)\n\n def update_status(self, *, status: STATUS_CHOICES) -> None:\n \"\"\"\n Updates the status of this session.\n\n Parameters\n ----------\n status\n The new status for this session.\n\n Returns\n -------\n \"\"\"\n self.status = status\n self.save()\n\n def get_absolute_url(self):\n return reverse(\n \"workstations:session-detail\",\n kwargs={\n \"slug\": self.workstation_image.workstation.slug,\n \"pk\": self.pk,\n },\n )\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"\n Saves the session instance, starting or stopping the service if needed.\n\n Returns\n -------\n \"\"\"\n created = self._state.adding\n\n super().save(*args, **kwargs)\n\n if created:\n start_service.apply_async(kwargs=self.task_kwargs)\n elif self.user_finished and self.status != self.STOPPED:\n stop_service.apply_async(kwargs=self.task_kwargs)\n", "path": "app/grandchallenge/workstations/models.py"}]} | 3,668 | 165 |
gh_patches_debug_66489 | rasdani/github-patches | git_diff | aio-libs__aiohttp-1752 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Encoding is always UTF-8 in POST data
## Long story short
I'm doing a `POST` request via `client.post`:
```
data = aiohttp.FormData({
'FindText': name,
}, charset='windows-1251')
client.post(base_url, params={'RowFrom': offset}, data=data)
```
where `name` contains some none-latin text (`'хан'`)
## Expected behaviour
POST data should contain: `FindText=%D5%E0%ED`
## Actual behaviour
`FindText=%D1%85%D0%B0%D0%BD'`
## Steps to reproduce
Looking through the code of `formdata.py:99`
```
urlencode(data, doseq=True).encode(charset),
```
I noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).
For now, I just manually do in my code:
```
data = urlencode({
'FindText': name,
}, encoding='windows-1251')
```
And I get the string that I need.
Is it a bug? Or am I doing it wrong?
## Your environment
```
Python 3.6.0 (default, Jan 16 2017, 12:12:55)
[GCC 6.3.1 20170109] on linux
---
aiohttp==2.0.3
```
</issue>
<code>
[start of aiohttp/formdata.py]
1 import io
2 from urllib.parse import urlencode
3
4 from multidict import MultiDict, MultiDictProxy
5
6 from . import hdrs, multipart, payload
7 from .helpers import guess_filename
8
9 __all__ = ('FormData',)
10
11
12 class FormData:
13 """Helper class for multipart/form-data and
14 application/x-www-form-urlencoded body generation."""
15
16 def __init__(self, fields=(), quote_fields=True, charset=None):
17 self._writer = multipart.MultipartWriter('form-data')
18 self._fields = []
19 self._is_multipart = False
20 self._quote_fields = quote_fields
21 self._charset = charset
22
23 if isinstance(fields, dict):
24 fields = list(fields.items())
25 elif not isinstance(fields, (list, tuple)):
26 fields = (fields,)
27 self.add_fields(*fields)
28
29 @property
30 def is_multipart(self):
31 return self._is_multipart
32
33 def add_field(self, name, value, *, content_type=None, filename=None,
34 content_transfer_encoding=None):
35
36 if isinstance(value, io.IOBase):
37 self._is_multipart = True
38 elif isinstance(value, (bytes, bytearray, memoryview)):
39 if filename is None and content_transfer_encoding is None:
40 filename = name
41
42 type_options = MultiDict({'name': name})
43 if filename is not None and not isinstance(filename, str):
44 raise TypeError('filename must be an instance of str. '
45 'Got: %s' % filename)
46 if filename is None and isinstance(value, io.IOBase):
47 filename = guess_filename(value, name)
48 if filename is not None:
49 type_options['filename'] = filename
50 self._is_multipart = True
51
52 headers = {}
53 if content_type is not None:
54 if not isinstance(content_type, str):
55 raise TypeError('content_type must be an instance of str. '
56 'Got: %s' % content_type)
57 headers[hdrs.CONTENT_TYPE] = content_type
58 self._is_multipart = True
59 if content_transfer_encoding is not None:
60 if not isinstance(content_transfer_encoding, str):
61 raise TypeError('content_transfer_encoding must be an instance'
62 ' of str. Got: %s' % content_transfer_encoding)
63 headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
64 self._is_multipart = True
65
66 self._fields.append((type_options, headers, value))
67
68 def add_fields(self, *fields):
69 to_add = list(fields)
70
71 while to_add:
72 rec = to_add.pop(0)
73
74 if isinstance(rec, io.IOBase):
75 k = guess_filename(rec, 'unknown')
76 self.add_field(k, rec)
77
78 elif isinstance(rec, (MultiDictProxy, MultiDict)):
79 to_add.extend(rec.items())
80
81 elif isinstance(rec, (list, tuple)) and len(rec) == 2:
82 k, fp = rec
83 self.add_field(k, fp)
84
85 else:
86 raise TypeError('Only io.IOBase, multidict and (name, file) '
87 'pairs allowed, use .add_field() for passing '
88 'more complex parameters, got {!r}'
89 .format(rec))
90
91 def _gen_form_urlencoded(self):
92 # form data (x-www-form-urlencoded)
93 data = []
94 for type_options, _, value in self._fields:
95 data.append((type_options['name'], value))
96
97 charset = self._charset if self._charset is not None else 'utf-8'
98 return payload.BytesPayload(
99 urlencode(data, doseq=True).encode(charset),
100 content_type='application/x-www-form-urlencoded')
101
102 def _gen_form_data(self):
103 """Encode a list of fields using the multipart/form-data MIME format"""
104 for dispparams, headers, value in self._fields:
105 try:
106 if hdrs.CONTENT_TYPE in headers:
107 part = payload.get_payload(
108 value, content_type=headers[hdrs.CONTENT_TYPE],
109 headers=headers, encoding=self._charset)
110 else:
111 part = payload.get_payload(
112 value, headers=headers, encoding=self._charset)
113 except Exception as exc:
114 raise TypeError(
115 'Can not serialize value type: %r\n '
116 'headers: %r\n value: %r' % (
117 type(value), headers, value)) from exc
118
119 if dispparams:
120 part.set_content_disposition(
121 'form-data', quote_fields=self._quote_fields, **dispparams
122 )
123 # FIXME cgi.FieldStorage doesn't likes body parts with
124 # Content-Length which were sent via chunked transfer encoding
125 part.headers.pop(hdrs.CONTENT_LENGTH, None)
126
127 self._writer.append_payload(part)
128
129 return self._writer
130
131 def __call__(self):
132 if self._is_multipart:
133 return self._gen_form_data()
134 else:
135 return self._gen_form_urlencoded()
136
[end of aiohttp/formdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py
--- a/aiohttp/formdata.py
+++ b/aiohttp/formdata.py
@@ -96,7 +96,7 @@
charset = self._charset if self._charset is not None else 'utf-8'
return payload.BytesPayload(
- urlencode(data, doseq=True).encode(charset),
+ urlencode(data, doseq=True, encoding=charset).encode(),
content_type='application/x-www-form-urlencoded')
def _gen_form_data(self):
| {"golden_diff": "diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py\n--- a/aiohttp/formdata.py\n+++ b/aiohttp/formdata.py\n@@ -96,7 +96,7 @@\n \n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n- urlencode(data, doseq=True).encode(charset),\n+ urlencode(data, doseq=True, encoding=charset).encode(),\n content_type='application/x-www-form-urlencoded')\n \n def _gen_form_data(self):\n", "issue": "Encoding is always UTF-8 in POST data\n## Long story short\r\n\r\nI'm doing a `POST` request via `client.post`:\r\n\r\n```\r\ndata = aiohttp.FormData({\r\n 'FindText': name,\r\n }, charset='windows-1251')\r\n\r\nclient.post(base_url, params={'RowFrom': offset}, data=data)\r\n```\r\n\r\nwhere `name` contains some none-latin text (`'\u0445\u0430\u043d'`)\r\n\r\n## Expected behaviour\r\n\r\nPOST data should contain: `FindText=%D5%E0%ED`\r\n\r\n## Actual behaviour\r\n\r\n`FindText=%D1%85%D0%B0%D0%BD'`\r\n\r\n## Steps to reproduce\r\n\r\nLooking through the code of `formdata.py:99`\r\n\r\n```\r\nurlencode(data, doseq=True).encode(charset),\r\n```\r\n\r\nI noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).\r\n\r\nFor now, I just manually do in my code:\r\n\r\n```\r\ndata = urlencode({\r\n 'FindText': name,\r\n }, encoding='windows-1251')\r\n```\r\n\r\nAnd I get the string that I need.\r\n\r\nIs it a bug? Or am I doing it wrong?\r\n\r\n## Your environment\r\n\r\n```\r\nPython 3.6.0 (default, Jan 16 2017, 12:12:55) \r\n[GCC 6.3.1 20170109] on linux\r\n---\r\naiohttp==2.0.3\r\n```\r\n\n", "before_files": [{"content": "import io\nfrom urllib.parse import urlencode\n\nfrom multidict import MultiDict, MultiDictProxy\n\nfrom . import hdrs, multipart, payload\nfrom .helpers import guess_filename\n\n__all__ = ('FormData',)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=(), quote_fields=True, charset=None):\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n self._quote_fields = quote_fields\n self._charset = charset\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec, (MultiDictProxy, MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters, got {!r}'\n .format(rec))\n\n def _gen_form_urlencoded(self):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n urlencode(data, doseq=True).encode(charset),\n content_type='application/x-www-form-urlencoded')\n\n def _gen_form_data(self):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n try:\n if hdrs.CONTENT_TYPE in headers:\n part = payload.get_payload(\n value, content_type=headers[hdrs.CONTENT_TYPE],\n headers=headers, encoding=self._charset)\n else:\n part = payload.get_payload(\n value, headers=headers, encoding=self._charset)\n except Exception as exc:\n raise TypeError(\n 'Can not serialize value type: %r\\n '\n 'headers: %r\\n value: %r' % (\n type(value), headers, value)) from exc\n\n if dispparams:\n part.set_content_disposition(\n 'form-data', quote_fields=self._quote_fields, **dispparams\n )\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n\n self._writer.append_payload(part)\n\n return self._writer\n\n def __call__(self):\n if self._is_multipart:\n return self._gen_form_data()\n else:\n return self._gen_form_urlencoded()\n", "path": "aiohttp/formdata.py"}]} | 2,218 | 121 |
gh_patches_debug_31145 | rasdani/github-patches | git_diff | archlinux__archinstall-408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_gfx_driver_packages not defined when choosing sway option (current master build)

_gfx_driver_packages not defined when choosing sway option (current master build)

_gfx_driver_packages not defined when choosing sway option (current master build)

</issue>
<code>
[start of profiles/sway.py]
1 # A desktop environment using "Sway"
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = ["sway", "swaylock", "swayidle", "waybar", "dmenu", "light", "grim", "slurp", "pavucontrol", "alacritty"]
8
9 def _prep_function(*args, **kwargs):
10 """
11 Magic function called by the importing installer
12 before continuing any further. It also avoids executing any
13 other code in this stage. So it's a safe way to ask the user
14 for more input before any other installer steps start.
15 """
16 if "nvidia" in _gfx_driver_packages:
17 choice = input("The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] ")
18 if choice.lower() in ("n", ""):
19 raise archinstall.lib.exceptions.HardwareIncompatibilityError("Sway does not support the proprietary nvidia drivers.")
20
21 __builtins__['_gfx_driver_packages'] = archinstall.select_driver()
22
23 return True
24
25 # Ensures that this code only gets executed if executed
26 # through importlib.util.spec_from_file_location("sway", "/somewhere/sway.py")
27 # or through conventional import sway
28 if __name__ == 'sway':
29 # Install the Sway packages
30 installation.add_additional_packages(__packages__)
31
[end of profiles/sway.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/sway.py b/profiles/sway.py
--- a/profiles/sway.py
+++ b/profiles/sway.py
@@ -4,7 +4,19 @@
is_top_level_profile = False
-__packages__ = ["sway", "swaylock", "swayidle", "waybar", "dmenu", "light", "grim", "slurp", "pavucontrol", "alacritty"]
+__packages__ = [
+ "sway",
+ "swaylock",
+ "swayidle",
+ "waybar",
+ "dmenu",
+ "light",
+ "grim",
+ "slurp",
+ "pavucontrol",
+ "alacritty",
+]
+
def _prep_function(*args, **kwargs):
"""
@@ -13,18 +25,26 @@
other code in this stage. So it's a safe way to ask the user
for more input before any other installer steps start.
"""
- if "nvidia" in _gfx_driver_packages:
- choice = input("The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] ")
- if choice.lower() in ("n", ""):
- raise archinstall.lib.exceptions.HardwareIncompatibilityError("Sway does not support the proprietary nvidia drivers.")
-
- __builtins__['_gfx_driver_packages'] = archinstall.select_driver()
+ __builtins__["_gfx_driver_packages"] = archinstall.select_driver()
return True
+
# Ensures that this code only gets executed if executed
# through importlib.util.spec_from_file_location("sway", "/somewhere/sway.py")
# or through conventional import sway
-if __name__ == 'sway':
+if __name__ == "sway":
+ if "nvidia" in _gfx_driver_packages:
+ choice = input(
+ "The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] "
+ )
+ if choice.lower() in ("n", ""):
+ raise archinstall.lib.exceptions.HardwareIncompatibilityError(
+ "Sway does not support the proprietary nvidia drivers."
+ )
+
# Install the Sway packages
installation.add_additional_packages(__packages__)
+
+ # Install the graphics driver packages
+ installation.add_additional_packages(_gfx_driver_packages)
| {"golden_diff": "diff --git a/profiles/sway.py b/profiles/sway.py\n--- a/profiles/sway.py\n+++ b/profiles/sway.py\n@@ -4,7 +4,19 @@\n \n is_top_level_profile = False\n \n-__packages__ = [\"sway\", \"swaylock\", \"swayidle\", \"waybar\", \"dmenu\", \"light\", \"grim\", \"slurp\", \"pavucontrol\", \"alacritty\"]\n+__packages__ = [\n+\t\"sway\",\n+\t\"swaylock\",\n+\t\"swayidle\",\n+\t\"waybar\",\n+\t\"dmenu\",\n+\t\"light\",\n+\t\"grim\",\n+\t\"slurp\",\n+\t\"pavucontrol\",\n+\t\"alacritty\",\n+]\n+\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n@@ -13,18 +25,26 @@\n \tother code in this stage. So it's a safe way to ask the user\n \tfor more input before any other installer steps start.\n \t\"\"\"\n-\tif \"nvidia\" in _gfx_driver_packages:\n-\t\tchoice = input(\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \")\n-\t\tif choice.lower() in (\"n\", \"\"):\n-\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\"Sway does not support the proprietary nvidia drivers.\")\n-\n-\t__builtins__['_gfx_driver_packages'] = archinstall.select_driver()\n+\t__builtins__[\"_gfx_driver_packages\"] = archinstall.select_driver()\n \n \treturn True\n \n+\n # Ensures that this code only gets executed if executed\n # through importlib.util.spec_from_file_location(\"sway\", \"/somewhere/sway.py\")\n # or through conventional import sway\n-if __name__ == 'sway':\n+if __name__ == \"sway\":\n+\tif \"nvidia\" in _gfx_driver_packages:\n+\t\tchoice = input(\n+\t\t\t\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \"\n+\t\t)\n+\t\tif choice.lower() in (\"n\", \"\"):\n+\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\n+\t\t\t\t\"Sway does not support the proprietary nvidia drivers.\"\n+\t\t\t)\n+\n \t# Install the Sway packages\n \tinstallation.add_additional_packages(__packages__)\n+\n+\t# Install the graphics driver packages\n+\tinstallation.add_additional_packages(_gfx_driver_packages)\n", "issue": "_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n_gfx_driver_packages not defined when choosing sway option (current master build)\n\r\n\n", "before_files": [{"content": "# A desktop environment using \"Sway\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\"sway\", \"swaylock\", \"swayidle\", \"waybar\", \"dmenu\", \"light\", \"grim\", \"slurp\", \"pavucontrol\", \"alacritty\"]\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tchoice = input(\"The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues. Continue anyways? [y/N] \")\n\t\tif choice.lower() in (\"n\", \"\"):\n\t\t\traise archinstall.lib.exceptions.HardwareIncompatibilityError(\"Sway does not support the proprietary nvidia drivers.\")\n\n\t__builtins__['_gfx_driver_packages'] = archinstall.select_driver()\n\n\treturn True\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"sway\", \"/somewhere/sway.py\")\n# or through conventional import sway\nif __name__ == 'sway':\n\t# Install the Sway packages\n\tinstallation.add_additional_packages(__packages__)\n", "path": "profiles/sway.py"}]} | 1,203 | 558 |
gh_patches_debug_1625 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optional dependencies are required for deprecated logging module
🐛 Bug
There is a backwards compatibility issues coming from PR #767. Notably, if a user doesn't have any of the extra logging dependencies then they'll be an import error.
### To Reproduce
1. Remove all logging dependencies from your environment (E.g. comet)
2. Depend on the deprecated pytorch_lightning.logging package and run
### Expected behavior
We expect to maintain backwards compatibility here so optional dependencies shouldn't be required.
</issue>
<code>
[start of pytorch_lightning/logging/__init__.py]
1 """
2 .. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0
3 """
4
5 import warnings
6
7 warnings.warn("`logging` package has been renamed to `loggers` since v0.6.1"
8 " and will be removed in v0.8.0", DeprecationWarning)
9
10 from pytorch_lightning.loggers import * # noqa: F403
11 from pytorch_lightning.loggers import ( # noqa: E402
12 base, comet, mlflow, neptune, tensorboard, test_tube, wandb
13 )
14
[end of pytorch_lightning/logging/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py
--- a/pytorch_lightning/logging/__init__.py
+++ b/pytorch_lightning/logging/__init__.py
@@ -8,6 +8,3 @@
" and will be removed in v0.8.0", DeprecationWarning)
from pytorch_lightning.loggers import * # noqa: F403
-from pytorch_lightning.loggers import ( # noqa: E402
- base, comet, mlflow, neptune, tensorboard, test_tube, wandb
-)
| {"golden_diff": "diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py\n--- a/pytorch_lightning/logging/__init__.py\n+++ b/pytorch_lightning/logging/__init__.py\n@@ -8,6 +8,3 @@\n \" and will be removed in v0.8.0\", DeprecationWarning)\n \n from pytorch_lightning.loggers import * # noqa: F403\n-from pytorch_lightning.loggers import ( # noqa: E402\n- base, comet, mlflow, neptune, tensorboard, test_tube, wandb\n-)\n", "issue": "Optional dependencies are required for deprecated logging module\n\ud83d\udc1b Bug\r\n\r\nThere is a backwards compatibility issues coming from PR #767. Notably, if a user doesn't have any of the extra logging dependencies then they'll be an import error.\r\n\r\n### To Reproduce\r\n\r\n1. Remove all logging dependencies from your environment (E.g. comet)\r\n2. Depend on the deprecated pytorch_lightning.logging package and run\r\n\r\n### Expected behavior\r\n\r\nWe expect to maintain backwards compatibility here so optional dependencies shouldn't be required.\n", "before_files": [{"content": "\"\"\"\n.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"`logging` package has been renamed to `loggers` since v0.6.1\"\n \" and will be removed in v0.8.0\", DeprecationWarning)\n\nfrom pytorch_lightning.loggers import * # noqa: F403\nfrom pytorch_lightning.loggers import ( # noqa: E402\n base, comet, mlflow, neptune, tensorboard, test_tube, wandb\n)\n", "path": "pytorch_lightning/logging/__init__.py"}]} | 806 | 138 |
gh_patches_debug_6678 | rasdani/github-patches | git_diff | psychopy__psychopy-2734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] visual.Rect component does not recalculate vertices after change of size, width and height properties
Which results in inability to update width and height of Rect during main loop. I have noticed that Rect class was updated (commit from 14 october), but this update made it unusable. Fix is simple, update vertices after setting size and set self._needVertexUpdate to True to enable redrawing updated shape.
</issue>
<code>
[start of psychopy/visual/rect.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """Creates a rectangle of given width and height as a special case of a
5 :class:`~psychopy.visual.ShapeStim`"""
6
7 # Part of the PsychoPy library
8 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.
9 # Distributed under the terms of the GNU General Public License (GPL).
10
11 from __future__ import absolute_import, print_function
12
13 import numpy as np
14
15 import psychopy # so we can get the __path__
16 from psychopy.visual.shape import BaseShapeStim
17 from psychopy.tools.attributetools import attributeSetter, setAttribute
18
19
20 class Rect(BaseShapeStim):
21 """Creates a rectangle of given width and height as a special case of a
22 :class:`~psychopy.visual.ShapeStim`
23
24 (New in version 1.72.00)
25
26 Attributes
27 ----------
28 width, height : float or int
29 The width and height of the rectangle. Values are aliased with fields
30 in the `size` attribute. Use these values to adjust the size of the
31 rectangle in a single dimension after initialization.
32
33 """
34 def __init__(self,
35 win,
36 width=.5,
37 height=.5,
38 autoLog=None,
39 units='',
40 lineWidth=1.5,
41 lineColor='white',
42 lineColorSpace='rgb',
43 fillColor=None,
44 fillColorSpace='rgb',
45 pos=(0, 0),
46 size=None,
47 ori=0.0,
48 opacity=1.0,
49 contrast=1.0,
50 depth=0,
51 interpolate=True,
52 name=None,
53 autoDraw=False):
54 """
55 Parameters
56 ----------
57 win : `~psychopy.visual.Window`
58 Window object to be associated with this stimuli.
59 width, height : float or int
60 The width and height of the rectangle. *DEPRECATED* use `size`
61 to define the dimensions of the rectangle on initialization. If
62 `size` is specified the values of `width` and `height` are
63 ignored. This is to provide legacy compatibility for existing
64 applications.
65 size : array_like, float or int
66 Width and height of the rectangle as (w, h) or [w, h]. If a single
67 value is provided, the width and height will be set to the same
68 specified value. If `None` is specified, the `size` will be set
69 with values passed to `width` and `height`.
70
71 """
72 # width and height attributes, these are later aliased with `size`
73 self.__dict__['width'] = float(width)
74 self.__dict__['height'] = float(height)
75
76 # If the size argument was specified, override values of width and
77 # height, this is to maintain legacy compatibility. Args width and
78 # height should be deprecated in later releases.
79 if size is None:
80 size = (self.__dict__['width'],
81 self.__dict__['height'])
82
83 # vertices for rectangle, CCW winding order
84 vertices = np.array([[-.5, .5],
85 [ .5, .5],
86 [ .5, -.5],
87 [-.5, -.5]])
88
89 super(Rect, self).__init__(
90 win,
91 units=units,
92 lineWidth=lineWidth,
93 lineColor=lineColor,
94 lineColorSpace=lineColorSpace,
95 fillColor=fillColor,
96 fillColorSpace=fillColorSpace,
97 vertices=vertices,
98 closeShape=True,
99 pos=pos,
100 size=size,
101 ori=ori,
102 opacity=opacity,
103 contrast=contrast,
104 depth=depth,
105 interpolate=interpolate,
106 name=name,
107 autoLog=autoLog,
108 autoDraw=autoDraw)
109
110 @attributeSetter
111 def size(self, value):
112 """array-like.
113 Size of the rectangle (`width` and `height`).
114 """
115 # Needed to override `size` to ensure `width` and `height` attrs
116 # are updated when it changes.
117 self.__dict__['size'] = np.array(value, float)
118
119 width, height = self.__dict__['size']
120 self.__dict__['width'] = width
121 self.__dict__['height'] = height
122
123 def setSize(self, size, operation='', log=None):
124 """Usually you can use 'stim.attribute = value' syntax instead,
125 but use this method if you need to suppress the log message
126
127 :ref:`Operations <attrib-operations>` supported.
128 """
129 setAttribute(self, 'size', size, log, operation)
130
131 @attributeSetter
132 def width(self, value):
133 """int or float.
134 Width of the Rectangle (in its respective units, if specified).
135
136 :ref:`Operations <attrib-operations>` supported.
137 """
138 self.__dict__['width'] = float(value)
139 self.size = (self.__dict__['width'], self.size[1])
140
141 def setWidth(self, width, operation='', log=None):
142 """Usually you can use 'stim.attribute = value' syntax instead,
143 but use this method if you need to suppress the log message
144 """
145 setAttribute(self, 'width', width, log, operation)
146
147 @attributeSetter
148 def height(self, value):
149 """int or float.
150 Height of the Rectangle (in its respective units, if specified).
151
152 :ref:`Operations <attrib-operations>` supported.
153 """
154 self.__dict__['height'] = float(value)
155 self.size = (self.size[0], self.__dict__['height'])
156
157 def setHeight(self, height, operation='', log=None):
158 """Usually you can use 'stim.attribute = value' syntax instead,
159 but use this method if you need to suppress the log message
160 """
161 setAttribute(self, 'height', height, log, operation)
162
[end of psychopy/visual/rect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/visual/rect.py b/psychopy/visual/rect.py
--- a/psychopy/visual/rect.py
+++ b/psychopy/visual/rect.py
@@ -120,6 +120,8 @@
self.__dict__['width'] = width
self.__dict__['height'] = height
+ self._needVertexUpdate = True
+
def setSize(self, size, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
| {"golden_diff": "diff --git a/psychopy/visual/rect.py b/psychopy/visual/rect.py\n--- a/psychopy/visual/rect.py\n+++ b/psychopy/visual/rect.py\n@@ -120,6 +120,8 @@\n self.__dict__['width'] = width\n self.__dict__['height'] = height\n \n+ self._needVertexUpdate = True\n+\n def setSize(self, size, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n", "issue": "[bug] visual.Rect component does not recalculate vertices after change of size, width and height properties\nWhich results in inability to update width and height of Rect during main loop. I have noticed that Rect class was updated (commit from 14 october), but this update made it unusable. Fix is simple, update vertices after setting size and set self._needVertexUpdate to True to enable redrawing updated shape.\r\n \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Creates a rectangle of given width and height as a special case of a\n:class:`~psychopy.visual.ShapeStim`\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom __future__ import absolute_import, print_function\n\nimport numpy as np\n\nimport psychopy # so we can get the __path__\nfrom psychopy.visual.shape import BaseShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\n\nclass Rect(BaseShapeStim):\n \"\"\"Creates a rectangle of given width and height as a special case of a\n :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n\n Attributes\n ----------\n width, height : float or int\n The width and height of the rectangle. Values are aliased with fields\n in the `size` attribute. Use these values to adjust the size of the\n rectangle in a single dimension after initialization.\n\n \"\"\"\n def __init__(self,\n win,\n width=.5,\n height=.5,\n autoLog=None,\n units='',\n lineWidth=1.5,\n lineColor='white',\n lineColorSpace='rgb',\n fillColor=None,\n fillColorSpace='rgb',\n pos=(0, 0),\n size=None,\n ori=0.0,\n opacity=1.0,\n contrast=1.0,\n depth=0,\n interpolate=True,\n name=None,\n autoDraw=False):\n \"\"\"\n Parameters\n ----------\n win : `~psychopy.visual.Window`\n Window object to be associated with this stimuli.\n width, height : float or int\n The width and height of the rectangle. *DEPRECATED* use `size`\n to define the dimensions of the rectangle on initialization. If\n `size` is specified the values of `width` and `height` are\n ignored. This is to provide legacy compatibility for existing\n applications.\n size : array_like, float or int\n Width and height of the rectangle as (w, h) or [w, h]. If a single\n value is provided, the width and height will be set to the same\n specified value. If `None` is specified, the `size` will be set\n with values passed to `width` and `height`.\n\n \"\"\"\n # width and height attributes, these are later aliased with `size`\n self.__dict__['width'] = float(width)\n self.__dict__['height'] = float(height)\n\n # If the size argument was specified, override values of width and\n # height, this is to maintain legacy compatibility. Args width and\n # height should be deprecated in later releases.\n if size is None:\n size = (self.__dict__['width'],\n self.__dict__['height'])\n\n # vertices for rectangle, CCW winding order\n vertices = np.array([[-.5, .5],\n [ .5, .5],\n [ .5, -.5],\n [-.5, -.5]])\n\n super(Rect, self).__init__(\n win,\n units=units,\n lineWidth=lineWidth,\n lineColor=lineColor,\n lineColorSpace=lineColorSpace,\n fillColor=fillColor,\n fillColorSpace=fillColorSpace,\n vertices=vertices,\n closeShape=True,\n pos=pos,\n size=size,\n ori=ori,\n opacity=opacity,\n contrast=contrast,\n depth=depth,\n interpolate=interpolate,\n name=name,\n autoLog=autoLog,\n autoDraw=autoDraw)\n\n @attributeSetter\n def size(self, value):\n \"\"\"array-like.\n Size of the rectangle (`width` and `height`).\n \"\"\"\n # Needed to override `size` to ensure `width` and `height` attrs\n # are updated when it changes.\n self.__dict__['size'] = np.array(value, float)\n\n width, height = self.__dict__['size']\n self.__dict__['width'] = width\n self.__dict__['height'] = height\n\n def setSize(self, size, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n setAttribute(self, 'size', size, log, operation)\n\n @attributeSetter\n def width(self, value):\n \"\"\"int or float.\n Width of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['width'] = float(value)\n self.size = (self.__dict__['width'], self.size[1])\n\n def setWidth(self, width, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'width', width, log, operation)\n\n @attributeSetter\n def height(self, value):\n \"\"\"int or float.\n Height of the Rectangle (in its respective units, if specified).\n\n :ref:`Operations <attrib-operations>` supported.\n \"\"\"\n self.__dict__['height'] = float(value)\n self.size = (self.size[0], self.__dict__['height'])\n\n def setHeight(self, height, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\n \"\"\"\n setAttribute(self, 'height', height, log, operation)\n", "path": "psychopy/visual/rect.py"}]} | 2,286 | 132 |
gh_patches_debug_22721 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-825 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.cookiecutterrc and cookiecutters_dir not working as I expected
Hi,
Here's the setup:
- I have a ~/.cookiecutterrc as talked about here: http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html
- I also have a cookiecutters_dir at ~/.cookiecutters/ with the subdirectory cookiecutter-pypackage/.
When I try to run "cookiecutter cookiecutter-pypackage/" in ~/Projects/, I get the following error
```
Traceback (most recent call last):
File "/opt/anaconda/bin/cookiecutter", line 9, in <module>
load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 610, in __call__
return self.main(*args, **kwargs)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 590, in main
rv = self.invoke(ctx)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 782, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py", line 416, in invoke
return callback(*args, **kwargs)
File "/Users/alexp/Projects/cookiecutter/cookiecutter/cli.py", line 70, in main
cookiecutter(template, checkout, no_input)
File "/Users/alexp/Projects/cookiecutter/cookiecutter/main.py", line 95, in cookiecutter
extra_context=extra_context,
File "/Users/alexp/Projects/cookiecutter/cookiecutter/generate.py", line 43, in generate_context
file_handle = open(context_file)
IOError: [Errno 2] No such file or directory: u'cookiecutter-pypackage/cookiecutter.json'
```
This error shows up if I either do pip install or with the git repo locally. Naturally, it makes a bit of sense. There is no directory ~/Projects/cookiecutter-pypackage/.
However, and perhaps I'm making a poor assumption about usage, it seems to me if I clone or otherwise create a cookiecutter and it's sitting in cookiecutters_dir, it'd be nice to just refer to it as I did above. For my usage, if I create a cookiecutter, I don't particularly want it sitting around a Projects directory, especially if I have multiple project directories for personal and organizational purposes.
In order to do this, I added three lines to main.py in my fork (see below) and it seems to work. I did it as an `elif` to try to preserve the possibility of a lack of a cookiecutters_dir. I have not written a test for this and admittedly I don't really know how. I will likely just use my fork with this modificationgoing forward but I wanted to let the developer crew know about this.
Cheers.
```
# cookiecutter.main
...
# TODO: find a better way to tell if it's a repo URL
if 'git@' in template or 'https://' in template:
repo_dir = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=config_dict['cookiecutters_dir'],
no_input=no_input
)
#### Added these three lines
elif 'cookiecutters_dir' in config_dict:
cookiecutters_dir = os.path.expanduser(config_dict['cookiecutters_dir'])
repo_dir = os.path.join(cookiecutters_dir,template)
else:
# If it's a local repo, no need to clone or copy to your
# cookiecutters_dir
repo_dir = template
```
</issue>
<code>
[start of cookiecutter/repository.py]
1 # -*- coding: utf-8 -*-
2
3 """Cookiecutter repository functions."""
4 from __future__ import unicode_literals
5 import os
6 import re
7
8 from .exceptions import RepositoryNotFound
9 from .vcs import clone
10
11 REPO_REGEX = re.compile(r"""
12 (?x)
13 ((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.
14 | # or
15 (\w+@[\w\.]+) # something like user@...
16 )
17 """)
18
19
20 def is_repo_url(value):
21 """Return True if value is a repository URL."""
22 return bool(REPO_REGEX.match(value))
23
24
25 def expand_abbreviations(template, abbreviations):
26 """
27 Expand abbreviations in a template name.
28
29 :param template: The project template name.
30 :param abbreviations: Abbreviation definitions.
31 """
32 if template in abbreviations:
33 return abbreviations[template]
34
35 # Split on colon. If there is no colon, rest will be empty
36 # and prefix will be the whole template
37 prefix, sep, rest = template.partition(':')
38 if prefix in abbreviations:
39 return abbreviations[prefix].format(rest)
40
41 return template
42
43
44 def repository_has_cookiecutter_json(repo_directory):
45 """Determines if `repo_directory` contains a `cookiecutter.json` file.
46
47 :param repo_directory: The candidate repository directory.
48 :return: True if the `repo_directory` is valid, else False.
49 """
50 repo_directory_exists = os.path.isdir(repo_directory)
51
52 repo_config_exists = os.path.isfile(
53 os.path.join(repo_directory, 'cookiecutter.json')
54 )
55 return repo_directory_exists and repo_config_exists
56
57
58 def determine_repo_dir(template, abbreviations, clone_to_dir, checkout,
59 no_input):
60 """
61 Locate the repository directory from a template reference.
62
63 Applies repository abbreviations to the template reference.
64 If the template refers to a repository URL, clone it.
65 If the template is a path to a local repository, use it.
66
67 :param template: A directory containing a project template directory,
68 or a URL to a git repository.
69 :param abbreviations: A dictionary of repository abbreviation
70 definitions.
71 :param clone_to_dir: The directory to clone the repository into.
72 :param checkout: The branch, tag or commit ID to checkout after clone.
73 :param no_input: Prompt the user at command line for manual configuration?
74 :return: The cookiecutter template directory
75 :raises: `RepositoryNotFound` if a repository directory could not be found.
76 """
77 template = expand_abbreviations(template, abbreviations)
78
79 if is_repo_url(template):
80 repo_dir = clone(
81 repo_url=template,
82 checkout=checkout,
83 clone_to_dir=clone_to_dir,
84 no_input=no_input,
85 )
86 else:
87 # If it's a local repo, no need to clone or copy to your
88 # cookiecutters_dir
89 repo_dir = template
90
91 if repository_has_cookiecutter_json(repo_dir):
92 return repo_dir
93
94 raise RepositoryNotFound(
95 'The repository {} could not be located or does not contain '
96 'a "cookiecutter.json" file.'.format(repo_dir)
97 )
98
[end of cookiecutter/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/repository.py b/cookiecutter/repository.py
--- a/cookiecutter/repository.py
+++ b/cookiecutter/repository.py
@@ -77,21 +77,27 @@
template = expand_abbreviations(template, abbreviations)
if is_repo_url(template):
- repo_dir = clone(
+ cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
+ repository_candidates = [cloned_repo]
else:
- # If it's a local repo, no need to clone or copy to your
- # cookiecutters_dir
- repo_dir = template
+ repository_candidates = [
+ template,
+ os.path.join(clone_to_dir, template)
+ ]
- if repository_has_cookiecutter_json(repo_dir):
- return repo_dir
+ for repo_candidate in repository_candidates:
+ if repository_has_cookiecutter_json(repo_candidate):
+ return repo_candidate
raise RepositoryNotFound(
- 'The repository {} could not be located or does not contain '
- 'a "cookiecutter.json" file.'.format(repo_dir)
+ 'A valid repository for "{}" could not be found in the following '
+ 'locations:\n{}'.format(
+ template,
+ '\n'.join(repository_candidates)
+ )
)
| {"golden_diff": "diff --git a/cookiecutter/repository.py b/cookiecutter/repository.py\n--- a/cookiecutter/repository.py\n+++ b/cookiecutter/repository.py\n@@ -77,21 +77,27 @@\n template = expand_abbreviations(template, abbreviations)\n \n if is_repo_url(template):\n- repo_dir = clone(\n+ cloned_repo = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=clone_to_dir,\n no_input=no_input,\n )\n+ repository_candidates = [cloned_repo]\n else:\n- # If it's a local repo, no need to clone or copy to your\n- # cookiecutters_dir\n- repo_dir = template\n+ repository_candidates = [\n+ template,\n+ os.path.join(clone_to_dir, template)\n+ ]\n \n- if repository_has_cookiecutter_json(repo_dir):\n- return repo_dir\n+ for repo_candidate in repository_candidates:\n+ if repository_has_cookiecutter_json(repo_candidate):\n+ return repo_candidate\n \n raise RepositoryNotFound(\n- 'The repository {} could not be located or does not contain '\n- 'a \"cookiecutter.json\" file.'.format(repo_dir)\n+ 'A valid repository for \"{}\" could not be found in the following '\n+ 'locations:\\n{}'.format(\n+ template,\n+ '\\n'.join(repository_candidates)\n+ )\n )\n", "issue": ".cookiecutterrc and cookiecutters_dir not working as I expected\nHi,\nHere's the setup:\n- I have a ~/.cookiecutterrc as talked about here: http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html\n- I also have a cookiecutters_dir at ~/.cookiecutters/ with the subdirectory cookiecutter-pypackage/.\n\nWhen I try to run \"cookiecutter cookiecutter-pypackage/\" in ~/Projects/, I get the following error\n\n```\nTraceback (most recent call last):\n File \"/opt/anaconda/bin/cookiecutter\", line 9, in <module>\n load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 610, in __call__\n return self.main(*args, **kwargs)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 590, in main\n rv = self.invoke(ctx)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 782, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/opt/anaconda/lib/python2.7/site-packages/click-3.3-py2.7.egg/click/core.py\", line 416, in invoke\n return callback(*args, **kwargs)\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/cli.py\", line 70, in main\n cookiecutter(template, checkout, no_input)\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/main.py\", line 95, in cookiecutter\n extra_context=extra_context,\n File \"/Users/alexp/Projects/cookiecutter/cookiecutter/generate.py\", line 43, in generate_context\n file_handle = open(context_file)\nIOError: [Errno 2] No such file or directory: u'cookiecutter-pypackage/cookiecutter.json'\n```\n\nThis error shows up if I either do pip install or with the git repo locally. Naturally, it makes a bit of sense. There is no directory ~/Projects/cookiecutter-pypackage/. \n\nHowever, and perhaps I'm making a poor assumption about usage, it seems to me if I clone or otherwise create a cookiecutter and it's sitting in cookiecutters_dir, it'd be nice to just refer to it as I did above. For my usage, if I create a cookiecutter, I don't particularly want it sitting around a Projects directory, especially if I have multiple project directories for personal and organizational purposes.\n\nIn order to do this, I added three lines to main.py in my fork (see below) and it seems to work. I did it as an `elif` to try to preserve the possibility of a lack of a cookiecutters_dir. I have not written a test for this and admittedly I don't really know how. I will likely just use my fork with this modificationgoing forward but I wanted to let the developer crew know about this. \n\nCheers.\n\n```\n# cookiecutter.main\n...\n# TODO: find a better way to tell if it's a repo URL\nif 'git@' in template or 'https://' in template:\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n#### Added these three lines\nelif 'cookiecutters_dir' in config_dict:\n cookiecutters_dir = os.path.expanduser(config_dict['cookiecutters_dir'])\n repo_dir = os.path.join(cookiecutters_dir,template)\nelse:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cookiecutter repository functions.\"\"\"\nfrom __future__ import unicode_literals\nimport os\nimport re\n\nfrom .exceptions import RepositoryNotFound\nfrom .vcs import clone\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, abbreviations):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param abbreviations: Abbreviation definitions.\n \"\"\"\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef repository_has_cookiecutter_json(repo_directory):\n \"\"\"Determines if `repo_directory` contains a `cookiecutter.json` file.\n\n :param repo_directory: The candidate repository directory.\n :return: True if the `repo_directory` is valid, else False.\n \"\"\"\n repo_directory_exists = os.path.isdir(repo_directory)\n\n repo_config_exists = os.path.isfile(\n os.path.join(repo_directory, 'cookiecutter.json')\n )\n return repo_directory_exists and repo_config_exists\n\n\ndef determine_repo_dir(template, abbreviations, clone_to_dir, checkout,\n no_input):\n \"\"\"\n Locate the repository directory from a template reference.\n\n Applies repository abbreviations to the template reference.\n If the template refers to a repository URL, clone it.\n If the template is a path to a local repository, use it.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param abbreviations: A dictionary of repository abbreviation\n definitions.\n :param clone_to_dir: The directory to clone the repository into.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :return: The cookiecutter template directory\n :raises: `RepositoryNotFound` if a repository directory could not be found.\n \"\"\"\n template = expand_abbreviations(template, abbreviations)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=clone_to_dir,\n no_input=no_input,\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if repository_has_cookiecutter_json(repo_dir):\n return repo_dir\n\n raise RepositoryNotFound(\n 'The repository {} could not be located or does not contain '\n 'a \"cookiecutter.json\" file.'.format(repo_dir)\n )\n", "path": "cookiecutter/repository.py"}]} | 2,317 | 309 |
gh_patches_debug_12348 | rasdani/github-patches | git_diff | pyodide__pyodide-74 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve parsing of result line
The parsing of the input Python to find the last line which will be evaluated (rather than executed) to provide the result is probably a little brittle in certain corner cases. We should look at what IPython does here and copy that.
</issue>
<code>
[start of src/pyodide.py]
1 """
2 A library of helper utilities for connecting Python to the browser environment.
3 """
4
5 from js import XMLHttpRequest
6
7 import io
8
9
10 def open_url(url):
11 """
12 Fetches a given *url* and returns a io.StringIO to access its contents.
13 """
14 req = XMLHttpRequest.new()
15 req.open('GET', url, False)
16 req.send(None)
17 return io.StringIO(req.response)
18
19
20 __all__ = ['open_url']
21
[end of src/pyodide.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyodide.py b/src/pyodide.py
--- a/src/pyodide.py
+++ b/src/pyodide.py
@@ -4,6 +4,7 @@
from js import XMLHttpRequest
+import ast
import io
@@ -17,4 +18,23 @@
return io.StringIO(req.response)
-__all__ = ['open_url']
+def eval_code(code, ns):
+ """
+ Runs a string of code, the last part of which may be an expression.
+ """
+ mod = ast.parse(code)
+ if isinstance(mod.body[-1], ast.Expr):
+ expr = ast.Expression(mod.body[-1].value)
+ del mod.body[-1]
+ else:
+ expr = None
+
+ if len(mod.body):
+ exec(compile(mod, '<exec>', mode='exec'), ns, ns)
+ if expr is not None:
+ return eval(compile(expr, '<eval>', mode='eval'), ns, ns)
+ else:
+ return None
+
+
+__all__ = ['open_url', 'eval_code']
| {"golden_diff": "diff --git a/src/pyodide.py b/src/pyodide.py\n--- a/src/pyodide.py\n+++ b/src/pyodide.py\n@@ -4,6 +4,7 @@\n \n from js import XMLHttpRequest\n \n+import ast\n import io\n \n \n@@ -17,4 +18,23 @@\n return io.StringIO(req.response)\n \n \n-__all__ = ['open_url']\n+def eval_code(code, ns):\n+ \"\"\"\n+ Runs a string of code, the last part of which may be an expression.\n+ \"\"\"\n+ mod = ast.parse(code)\n+ if isinstance(mod.body[-1], ast.Expr):\n+ expr = ast.Expression(mod.body[-1].value)\n+ del mod.body[-1]\n+ else:\n+ expr = None\n+\n+ if len(mod.body):\n+ exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n+ if expr is not None:\n+ return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n+ else:\n+ return None\n+\n+\n+__all__ = ['open_url', 'eval_code']\n", "issue": "Improve parsing of result line\nThe parsing of the input Python to find the last line which will be evaluated (rather than executed) to provide the result is probably a little brittle in certain corner cases. We should look at what IPython does here and copy that.\n", "before_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nfrom js import XMLHttpRequest\n\nimport io\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\n__all__ = ['open_url']\n", "path": "src/pyodide.py"}]} | 717 | 247 |
gh_patches_debug_11998 | rasdani/github-patches | git_diff | pyodide__pyodide-931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CLAPACK.js is missing from the dev branch
For some reason, I started to see the CLAPACK.js missing error when using pyodide from https://cdn.jsdelivr.net/pyodide/dev/full/:
```
Couldn't load package from URL https://cdn.jsdelivr.net/pyodide/dev/full/CLAPACK.js
```
</issue>
<code>
[start of pyodide_build/buildall.py]
1 #!/usr/bin/env python3
2
3 """
4 Build all of the packages in a given directory.
5 """
6
7 import argparse
8 from functools import total_ordering
9 import json
10 from pathlib import Path
11 from queue import Queue, PriorityQueue
12 import shutil
13 import subprocess
14 import sys
15 from threading import Thread
16 from time import sleep
17 from typing import Dict, Set, Optional, List
18
19 from . import common
20
21
22 @total_ordering
23 class Package:
24 def __init__(self, pkgdir: Path):
25 self.pkgdir = pkgdir
26
27 pkgpath = pkgdir / "meta.yaml"
28 if not pkgpath.is_file():
29 raise ValueError(f"Directory {pkgdir} does not contain meta.yaml")
30
31 self.meta: dict = common.parse_package(pkgpath)
32 self.name: str = self.meta["package"]["name"]
33 self.library: bool = self.meta.get("build", {}).get("library", False)
34
35 assert self.name == pkgdir.stem
36
37 self.dependencies: List[str] = self.meta.get("requirements", {}).get("run", [])
38 self.unbuilt_dependencies: Set[str] = set(self.dependencies)
39 self.dependents: Set[str] = set()
40
41 def build(self, outputdir: Path, args) -> None:
42 with open(self.pkgdir / "build.log", "w") as f:
43 if self.library:
44 p = subprocess.run(
45 ["make"],
46 cwd=self.pkgdir,
47 check=False,
48 stdout=f,
49 stderr=subprocess.STDOUT,
50 )
51 else:
52 p = subprocess.run(
53 [
54 sys.executable,
55 "-m",
56 "pyodide_build",
57 "buildpkg",
58 str(self.pkgdir / "meta.yaml"),
59 "--package_abi",
60 str(args.package_abi),
61 "--cflags",
62 args.cflags,
63 "--ldflags",
64 args.ldflags,
65 "--target",
66 args.target,
67 "--install-dir",
68 args.install_dir,
69 ],
70 check=False,
71 stdout=f,
72 stderr=subprocess.STDOUT,
73 )
74
75 with open(self.pkgdir / "build.log", "r") as f:
76 shutil.copyfileobj(f, sys.stdout)
77
78 p.check_returncode()
79
80 if not self.library:
81 shutil.copyfile(
82 self.pkgdir / "build" / (self.name + ".data"),
83 outputdir / (self.name + ".data"),
84 )
85 shutil.copyfile(
86 self.pkgdir / "build" / (self.name + ".js"),
87 outputdir / (self.name + ".js"),
88 )
89
90 # We use this in the priority queue, which pops off the smallest element.
91 # So we want the smallest element to have the largest number of dependents
92 def __lt__(self, other) -> bool:
93 return len(self.dependents) > len(other.dependents)
94
95 def __eq__(self, other) -> bool:
96 return len(self.dependents) == len(other.dependents)
97
98
99 def generate_dependency_graph(
100 packages_dir: Path, package_list: Optional[str]
101 ) -> Dict[str, Package]:
102 """
103 This generates a dependency graph for the packages listed in package_list.
104 A node in the graph is a Package object defined above, which maintains a
105 list of dependencies and also dependents. That is, each node stores both
106 incoming and outgoing edges.
107
108 The dependencies and dependents are stored via their name, and we have a
109 lookup table pkg_map: Dict[str, Package] to look up the corresponding
110 Package object. The function returns pkg_map, which contains all packages
111 in the graph as its values.
112
113 Parameters:
114 - packages_dir: directory that contains packages
115 - package_list: set of packages to build. If None, then all packages in
116 packages_dir are compiled.
117
118 Returns:
119 - pkg_map: dictionary mapping package names to Package objects
120 """
121
122 pkg_map: Dict[str, Package] = {}
123
124 packages: Optional[Set[str]] = common._parse_package_subset(package_list)
125 if packages is None:
126 packages = set(
127 str(x) for x in packages_dir.iterdir() if (x / "meta.yaml").is_file()
128 )
129
130 while packages:
131 pkgname = packages.pop()
132
133 pkg = Package(packages_dir / pkgname)
134 pkg_map[pkg.name] = pkg
135
136 for dep in pkg.dependencies:
137 if pkg_map.get(dep) is None:
138 packages.add(dep)
139
140 # Compute dependents
141 for pkg in pkg_map.values():
142 for dep in pkg.dependencies:
143 pkg_map[dep].dependents.add(pkg.name)
144
145 return pkg_map
146
147
148 def build_from_graph(pkg_map: Dict[str, Package], outputdir: Path, args) -> None:
149 """
150 This builds packages in pkg_map in parallel, building at most args.n_jobs
151 packages at once.
152
153 We have a priority queue of packages we are ready to build (build_queue),
154 where a package is ready to build if all its dependencies are built. The
155 priority is based on the number of dependents --- we prefer to build
156 packages with more dependents first.
157
158 To build packages in parallel, we use a thread pool of args.n_jobs many
159 threads listening to build_queue. When the thread is free, it takes an
160 item off build_queue and builds it. Once the package is built, it sends the
161 package to the built_queue. The main thread listens to the built_queue and
162 checks if any of the dependents are ready to be built. If so, it add the
163 package to the build queue.
164 """
165
166 # Insert packages into build_queue. We *must* do this after counting
167 # dependents, because the ordering ought not to change after insertion.
168 build_queue: PriorityQueue = PriorityQueue()
169 for pkg in pkg_map.values():
170 if len(pkg.dependencies) == 0:
171 build_queue.put(pkg)
172
173 built_queue: Queue = Queue()
174
175 def builder(n):
176 print(f"Starting thread {n}")
177 while True:
178 pkg = build_queue.get()
179 print(f"Thread {n} building {pkg.name}")
180 try:
181 pkg.build(outputdir, args)
182 except Exception as e:
183 built_queue.put(e)
184 return
185
186 print(f"Thread {n} built {pkg.name}")
187 built_queue.put(pkg)
188 # Release the GIL so new packages get queued
189 sleep(0.01)
190
191 for n in range(0, args.n_jobs):
192 Thread(target=builder, args=(n + 1,), daemon=True).start()
193
194 num_built = 0
195 while num_built < len(pkg_map):
196 pkg = built_queue.get()
197 if isinstance(pkg, Exception):
198 raise pkg
199
200 num_built += 1
201
202 for _dependent in pkg.dependents:
203 dependent = pkg_map[_dependent]
204 dependent.unbuilt_dependencies.remove(pkg.name)
205 if len(dependent.unbuilt_dependencies) == 0:
206 build_queue.put(dependent)
207
208
209 def build_packages(packages_dir: Path, outputdir: Path, args) -> None:
210 pkg_map = generate_dependency_graph(packages_dir, args.only)
211
212 build_from_graph(pkg_map, outputdir, args)
213
214 # Build package.json data. The "test" package is built in a different way,
215 # so we hardcode its existence here.
216 #
217 # This is done last so the Makefile can use it as a completion token.
218 package_data: dict = {
219 "dependencies": {"test": []},
220 "import_name_to_package_name": {},
221 }
222
223 for name, pkg in pkg_map.items():
224 if pkg.library:
225 continue
226
227 package_data["dependencies"][name] = pkg.dependencies
228 for imp in pkg.meta.get("test", {}).get("imports", [name]):
229 package_data["import_name_to_package_name"][imp] = name
230
231 with open(outputdir / "packages.json", "w") as fd:
232 json.dump(package_data, fd)
233
234
235 def make_parser(parser):
236 parser.description = (
237 "Build all of the packages in a given directory\n\n"
238 "Unless the --only option is provided"
239 )
240 parser.add_argument(
241 "dir",
242 type=str,
243 nargs=1,
244 help="Input directory containing a tree of package definitions",
245 )
246 parser.add_argument(
247 "output",
248 type=str,
249 nargs=1,
250 help="Output directory in which to put all built packages",
251 )
252 parser.add_argument(
253 "--package_abi",
254 type=int,
255 required=True,
256 help="The ABI number for the packages to be built",
257 )
258 parser.add_argument(
259 "--cflags",
260 type=str,
261 nargs="?",
262 default=common.DEFAULTCFLAGS,
263 help="Extra compiling flags",
264 )
265 parser.add_argument(
266 "--ldflags",
267 type=str,
268 nargs="?",
269 default=common.DEFAULTLDFLAGS,
270 help="Extra linking flags",
271 )
272 parser.add_argument(
273 "--target",
274 type=str,
275 nargs="?",
276 default=common.TARGETPYTHON,
277 help="The path to the target Python installation",
278 )
279 parser.add_argument(
280 "--install-dir",
281 type=str,
282 nargs="?",
283 default="",
284 help=(
285 "Directory for installing built host packages. Defaults to setup.py "
286 "default. Set to 'skip' to skip installation. Installation is "
287 "needed if you want to build other packages that depend on this one."
288 ),
289 )
290 parser.add_argument(
291 "--only",
292 type=str,
293 nargs="?",
294 default=None,
295 help=(
296 "Only build the specified packages, provided as a comma " "separated list"
297 ),
298 )
299 parser.add_argument(
300 "--n-jobs",
301 type=int,
302 nargs="?",
303 default=4,
304 help="Number of packages to build in parallel",
305 )
306 return parser
307
308
309 def main(args):
310 packages_dir = Path(args.dir[0]).resolve()
311 outputdir = Path(args.output[0]).resolve()
312 build_packages(packages_dir, outputdir, args)
313
314
315 if __name__ == "__main__":
316 parser = make_parser(argparse.ArgumentParser())
317 args = parser.parse_args()
318 main(args)
319
[end of pyodide_build/buildall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyodide_build/buildall.py b/pyodide_build/buildall.py
--- a/pyodide_build/buildall.py
+++ b/pyodide_build/buildall.py
@@ -220,11 +220,15 @@
"import_name_to_package_name": {},
}
+ libraries = [pkg.name for pkg in pkg_map.values() if pkg.library]
+
for name, pkg in pkg_map.items():
if pkg.library:
continue
- package_data["dependencies"][name] = pkg.dependencies
+ package_data["dependencies"][name] = [
+ x for x in pkg.dependencies if x not in libraries
+ ]
for imp in pkg.meta.get("test", {}).get("imports", [name]):
package_data["import_name_to_package_name"][imp] = name
| {"golden_diff": "diff --git a/pyodide_build/buildall.py b/pyodide_build/buildall.py\n--- a/pyodide_build/buildall.py\n+++ b/pyodide_build/buildall.py\n@@ -220,11 +220,15 @@\n \"import_name_to_package_name\": {},\n }\n \n+ libraries = [pkg.name for pkg in pkg_map.values() if pkg.library]\n+\n for name, pkg in pkg_map.items():\n if pkg.library:\n continue\n \n- package_data[\"dependencies\"][name] = pkg.dependencies\n+ package_data[\"dependencies\"][name] = [\n+ x for x in pkg.dependencies if x not in libraries\n+ ]\n for imp in pkg.meta.get(\"test\", {}).get(\"imports\", [name]):\n package_data[\"import_name_to_package_name\"][imp] = name\n", "issue": "CLAPACK.js is missing from the dev branch\nFor some reason, I started to see the CLAPACK.js missing error when using pyodide from https://cdn.jsdelivr.net/pyodide/dev/full/:\r\n\r\n```\r\nCouldn't load package from URL https://cdn.jsdelivr.net/pyodide/dev/full/CLAPACK.js\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuild all of the packages in a given directory.\n\"\"\"\n\nimport argparse\nfrom functools import total_ordering\nimport json\nfrom pathlib import Path\nfrom queue import Queue, PriorityQueue\nimport shutil\nimport subprocess\nimport sys\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Dict, Set, Optional, List\n\nfrom . import common\n\n\n@total_ordering\nclass Package:\n def __init__(self, pkgdir: Path):\n self.pkgdir = pkgdir\n\n pkgpath = pkgdir / \"meta.yaml\"\n if not pkgpath.is_file():\n raise ValueError(f\"Directory {pkgdir} does not contain meta.yaml\")\n\n self.meta: dict = common.parse_package(pkgpath)\n self.name: str = self.meta[\"package\"][\"name\"]\n self.library: bool = self.meta.get(\"build\", {}).get(\"library\", False)\n\n assert self.name == pkgdir.stem\n\n self.dependencies: List[str] = self.meta.get(\"requirements\", {}).get(\"run\", [])\n self.unbuilt_dependencies: Set[str] = set(self.dependencies)\n self.dependents: Set[str] = set()\n\n def build(self, outputdir: Path, args) -> None:\n with open(self.pkgdir / \"build.log\", \"w\") as f:\n if self.library:\n p = subprocess.run(\n [\"make\"],\n cwd=self.pkgdir,\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n else:\n p = subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pyodide_build\",\n \"buildpkg\",\n str(self.pkgdir / \"meta.yaml\"),\n \"--package_abi\",\n str(args.package_abi),\n \"--cflags\",\n args.cflags,\n \"--ldflags\",\n args.ldflags,\n \"--target\",\n args.target,\n \"--install-dir\",\n args.install_dir,\n ],\n check=False,\n stdout=f,\n stderr=subprocess.STDOUT,\n )\n\n with open(self.pkgdir / \"build.log\", \"r\") as f:\n shutil.copyfileobj(f, sys.stdout)\n\n p.check_returncode()\n\n if not self.library:\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".data\"),\n outputdir / (self.name + \".data\"),\n )\n shutil.copyfile(\n self.pkgdir / \"build\" / (self.name + \".js\"),\n outputdir / (self.name + \".js\"),\n )\n\n # We use this in the priority queue, which pops off the smallest element.\n # So we want the smallest element to have the largest number of dependents\n def __lt__(self, other) -> bool:\n return len(self.dependents) > len(other.dependents)\n\n def __eq__(self, other) -> bool:\n return len(self.dependents) == len(other.dependents)\n\n\ndef generate_dependency_graph(\n packages_dir: Path, package_list: Optional[str]\n) -> Dict[str, Package]:\n \"\"\"\n This generates a dependency graph for the packages listed in package_list.\n A node in the graph is a Package object defined above, which maintains a\n list of dependencies and also dependents. That is, each node stores both\n incoming and outgoing edges.\n\n The dependencies and dependents are stored via their name, and we have a\n lookup table pkg_map: Dict[str, Package] to look up the corresponding\n Package object. The function returns pkg_map, which contains all packages\n in the graph as its values.\n\n Parameters:\n - packages_dir: directory that contains packages\n - package_list: set of packages to build. If None, then all packages in\n packages_dir are compiled.\n\n Returns:\n - pkg_map: dictionary mapping package names to Package objects\n \"\"\"\n\n pkg_map: Dict[str, Package] = {}\n\n packages: Optional[Set[str]] = common._parse_package_subset(package_list)\n if packages is None:\n packages = set(\n str(x) for x in packages_dir.iterdir() if (x / \"meta.yaml\").is_file()\n )\n\n while packages:\n pkgname = packages.pop()\n\n pkg = Package(packages_dir / pkgname)\n pkg_map[pkg.name] = pkg\n\n for dep in pkg.dependencies:\n if pkg_map.get(dep) is None:\n packages.add(dep)\n\n # Compute dependents\n for pkg in pkg_map.values():\n for dep in pkg.dependencies:\n pkg_map[dep].dependents.add(pkg.name)\n\n return pkg_map\n\n\ndef build_from_graph(pkg_map: Dict[str, Package], outputdir: Path, args) -> None:\n \"\"\"\n This builds packages in pkg_map in parallel, building at most args.n_jobs\n packages at once.\n\n We have a priority queue of packages we are ready to build (build_queue),\n where a package is ready to build if all its dependencies are built. The\n priority is based on the number of dependents --- we prefer to build\n packages with more dependents first.\n\n To build packages in parallel, we use a thread pool of args.n_jobs many\n threads listening to build_queue. When the thread is free, it takes an\n item off build_queue and builds it. Once the package is built, it sends the\n package to the built_queue. The main thread listens to the built_queue and\n checks if any of the dependents are ready to be built. If so, it add the\n package to the build queue.\n \"\"\"\n\n # Insert packages into build_queue. We *must* do this after counting\n # dependents, because the ordering ought not to change after insertion.\n build_queue: PriorityQueue = PriorityQueue()\n for pkg in pkg_map.values():\n if len(pkg.dependencies) == 0:\n build_queue.put(pkg)\n\n built_queue: Queue = Queue()\n\n def builder(n):\n print(f\"Starting thread {n}\")\n while True:\n pkg = build_queue.get()\n print(f\"Thread {n} building {pkg.name}\")\n try:\n pkg.build(outputdir, args)\n except Exception as e:\n built_queue.put(e)\n return\n\n print(f\"Thread {n} built {pkg.name}\")\n built_queue.put(pkg)\n # Release the GIL so new packages get queued\n sleep(0.01)\n\n for n in range(0, args.n_jobs):\n Thread(target=builder, args=(n + 1,), daemon=True).start()\n\n num_built = 0\n while num_built < len(pkg_map):\n pkg = built_queue.get()\n if isinstance(pkg, Exception):\n raise pkg\n\n num_built += 1\n\n for _dependent in pkg.dependents:\n dependent = pkg_map[_dependent]\n dependent.unbuilt_dependencies.remove(pkg.name)\n if len(dependent.unbuilt_dependencies) == 0:\n build_queue.put(dependent)\n\n\ndef build_packages(packages_dir: Path, outputdir: Path, args) -> None:\n pkg_map = generate_dependency_graph(packages_dir, args.only)\n\n build_from_graph(pkg_map, outputdir, args)\n\n # Build package.json data. The \"test\" package is built in a different way,\n # so we hardcode its existence here.\n #\n # This is done last so the Makefile can use it as a completion token.\n package_data: dict = {\n \"dependencies\": {\"test\": []},\n \"import_name_to_package_name\": {},\n }\n\n for name, pkg in pkg_map.items():\n if pkg.library:\n continue\n\n package_data[\"dependencies\"][name] = pkg.dependencies\n for imp in pkg.meta.get(\"test\", {}).get(\"imports\", [name]):\n package_data[\"import_name_to_package_name\"][imp] = name\n\n with open(outputdir / \"packages.json\", \"w\") as fd:\n json.dump(package_data, fd)\n\n\ndef make_parser(parser):\n parser.description = (\n \"Build all of the packages in a given directory\\n\\n\"\n \"Unless the --only option is provided\"\n )\n parser.add_argument(\n \"dir\",\n type=str,\n nargs=1,\n help=\"Input directory containing a tree of package definitions\",\n )\n parser.add_argument(\n \"output\",\n type=str,\n nargs=1,\n help=\"Output directory in which to put all built packages\",\n )\n parser.add_argument(\n \"--package_abi\",\n type=int,\n required=True,\n help=\"The ABI number for the packages to be built\",\n )\n parser.add_argument(\n \"--cflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTCFLAGS,\n help=\"Extra compiling flags\",\n )\n parser.add_argument(\n \"--ldflags\",\n type=str,\n nargs=\"?\",\n default=common.DEFAULTLDFLAGS,\n help=\"Extra linking flags\",\n )\n parser.add_argument(\n \"--target\",\n type=str,\n nargs=\"?\",\n default=common.TARGETPYTHON,\n help=\"The path to the target Python installation\",\n )\n parser.add_argument(\n \"--install-dir\",\n type=str,\n nargs=\"?\",\n default=\"\",\n help=(\n \"Directory for installing built host packages. Defaults to setup.py \"\n \"default. Set to 'skip' to skip installation. Installation is \"\n \"needed if you want to build other packages that depend on this one.\"\n ),\n )\n parser.add_argument(\n \"--only\",\n type=str,\n nargs=\"?\",\n default=None,\n help=(\n \"Only build the specified packages, provided as a comma \" \"separated list\"\n ),\n )\n parser.add_argument(\n \"--n-jobs\",\n type=int,\n nargs=\"?\",\n default=4,\n help=\"Number of packages to build in parallel\",\n )\n return parser\n\n\ndef main(args):\n packages_dir = Path(args.dir[0]).resolve()\n outputdir = Path(args.output[0]).resolve()\n build_packages(packages_dir, outputdir, args)\n\n\nif __name__ == \"__main__\":\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/buildall.py"}]} | 3,656 | 179 |
gh_patches_debug_31259 | rasdani/github-patches | git_diff | talonhub__community-243 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature request: `phrase hello slash world` -> `hello/world`
I'm not sure what the best way to go about this is, it might be simple.
some more examples/unit tests:
- `phrase hello slash world` -> `hello/world` (currently `hello /world`)
- `phrase hello slash snake name of file slash world` -> `hello/name_of_file/world` (currently `hello/name_of_file_/world`)
Similarly: `phrase file dot extension` -> `file.extension`, though this one might be complicated by the fact that I have a formatter called `dot` defined so that `dot hello world` -> `.hello_world` because I use python so much.
</issue>
<code>
[start of code/formatters.py]
1 from talon import Module, Context, actions, ui, imgui
2 from talon.grammar import Phrase
3 from typing import List, Union
4 import re
5
6 ctx = Context()
7 key = actions.key
8 edit = actions.edit
9
10 words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(
11 ","
12 )
13
14 # last_phrase has the last phrase spoken, WITHOUT formatting.
15 # This is needed for reformatting.
16 last_phrase = ""
17
18 # formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.
19 formatted_phrase_history = []
20 formatted_phrase_history_length = 20
21
22
23 def surround(by):
24 def func(i, word, last):
25 if i == 0:
26 word = by + word
27 if last:
28 word += by
29 return word
30
31 return func
32
33
34 def format_phrase(m: Union[str, Phrase], fmtrs: str):
35 global last_phrase
36 last_phrase = m
37 words = []
38 if isinstance(m, str):
39 words = m.split(" ")
40 else:
41 if m.words[-1] == "over":
42 m.words = m.words[:-1]
43
44 words = actions.dictate.parse_words(m)
45 words = actions.dictate.replace_words(words)
46
47 result = format_phrase_no_history(words, fmtrs)
48
49 # Add result to history.
50 global formatted_phrase_history
51 formatted_phrase_history.insert(0, result)
52 formatted_phrase_history = formatted_phrase_history[
53 :formatted_phrase_history_length
54 ]
55
56 return result
57
58
59 def format_phrase_no_history(word_list, fmtrs: str):
60 fmtr_list = fmtrs.split(",")
61 words = []
62 spaces = True
63 for i, w in enumerate(word_list):
64 for name in reversed(fmtr_list):
65 smash, func = all_formatters[name]
66 w = func(i, w, i == len(word_list) - 1)
67 spaces = spaces and not smash
68 words.append(w)
69 sep = " " if spaces else ""
70 return sep.join(words)
71
72
73 NOSEP = True
74 SEP = False
75
76
77 def words_with_joiner(joiner):
78 """Pass through words unchanged, but add a separator between them."""
79
80 def formatter_function(i, word, _):
81 return word if i == 0 else joiner + word
82
83 return (NOSEP, formatter_function)
84
85
86 def first_vs_rest(first_func, rest_func=lambda w: w):
87 """Supply one or two transformer functions for the first and rest of
88 words respectively.
89
90 Leave second argument out if you want all but the first word to be passed
91 through unchanged.
92 Set first argument to None if you want the first word to be passed
93 through unchanged."""
94 if first_func is None:
95 first_func = lambda w: w
96
97 def formatter_function(i, word, _):
98 return first_func(word) if i == 0 else rest_func(word)
99
100 return formatter_function
101
102
103 def every_word(word_func):
104 """Apply one function to every word."""
105
106 def formatter_function(i, word, _):
107 return word_func(word)
108
109 return formatter_function
110
111
112 formatters_dict = {
113 "NOOP": (SEP, lambda i, word, _: word),
114 "DOUBLE_UNDERSCORE": (NOSEP, first_vs_rest(lambda w: "__%s__" % w)),
115 "PRIVATE_CAMEL_CASE": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),
116 "PROTECTED_CAMEL_CASE": (
117 NOSEP,
118 first_vs_rest(lambda w: w, lambda w: w.capitalize()),
119 ),
120 "PUBLIC_CAMEL_CASE": (NOSEP, every_word(lambda w: w.capitalize())),
121 "SNAKE_CASE": (
122 NOSEP,
123 first_vs_rest(lambda w: w.lower(), lambda w: "_" + w.lower()),
124 ),
125 "NO_SPACES": (NOSEP, every_word(lambda w: w)),
126 "DASH_SEPARATED": words_with_joiner("-"),
127 "TERMINAL_DASH_SEPARATED": (
128 NOSEP,
129 first_vs_rest(lambda w: " --" + w.lower(), lambda w: "-" + w.lower()),
130 ),
131 "DOUBLE_COLON_SEPARATED": words_with_joiner("::"),
132 "ALL_CAPS": (SEP, every_word(lambda w: w.upper())),
133 "ALL_LOWERCASE": (SEP, every_word(lambda w: w.lower())),
134 "DOUBLE_QUOTED_STRING": (SEP, surround('"')),
135 "SINGLE_QUOTED_STRING": (SEP, surround("'")),
136 "SPACE_SURROUNDED_STRING": (SEP, surround(" ")),
137 "DOT_SEPARATED": words_with_joiner("."),
138 "DOT_SNAKE": (NOSEP, lambda i, word, _: "." + word if i == 0 else "_" + word),
139 "SLASH_SEPARATED": (NOSEP, every_word(lambda w: "/" + w)),
140 "CAPITALIZE_FIRST_WORD": (SEP, first_vs_rest(lambda w: w.capitalize())),
141 "CAPITALIZE_ALL_WORDS": (
142 SEP,
143 lambda i, word, _: word.capitalize()
144 if i == 0 or word not in words_to_keep_lowercase
145 else word,
146 ),
147 "FIRST_THREE": (NOSEP, lambda i, word, _: word[0:3]),
148 "FIRST_FOUR": (NOSEP, lambda i, word, _: word[0:4]),
149 "FIRST_FIVE": (NOSEP, lambda i, word, _: word[0:5]),
150 }
151
152 # This is the mapping from spoken phrases to formatters
153 formatters_words = {
154 "allcaps": formatters_dict["ALL_CAPS"],
155 "alldown": formatters_dict["ALL_LOWERCASE"],
156 "camel": formatters_dict["PRIVATE_CAMEL_CASE"],
157 "dotted": formatters_dict["DOT_SEPARATED"],
158 "dubstring": formatters_dict["DOUBLE_QUOTED_STRING"],
159 "dunder": formatters_dict["DOUBLE_UNDERSCORE"],
160 "hammer": formatters_dict["PUBLIC_CAMEL_CASE"],
161 "kebab": formatters_dict["DASH_SEPARATED"],
162 "packed": formatters_dict["DOUBLE_COLON_SEPARATED"],
163 "padded": formatters_dict["SPACE_SURROUNDED_STRING"],
164 # "say": formatters_dict["NOOP"],
165 "sentence": formatters_dict["CAPITALIZE_FIRST_WORD"],
166 "slasher": formatters_dict["SLASH_SEPARATED"],
167 "smash": formatters_dict["NO_SPACES"],
168 "snake": formatters_dict["SNAKE_CASE"],
169 # "speak": formatters_dict["NOOP"],
170 "string": formatters_dict["SINGLE_QUOTED_STRING"],
171 "title": formatters_dict["CAPITALIZE_ALL_WORDS"],
172 # disable a few formatters for now
173 # "tree": formatters_dict["FIRST_THREE"],
174 # "quad": formatters_dict["FIRST_FOUR"],
175 # "fiver": formatters_dict["FIRST_FIVE"],
176 }
177
178 all_formatters = {}
179 all_formatters.update(formatters_dict)
180 all_formatters.update(formatters_words)
181
182 mod = Module()
183 mod.list("formatters", desc="list of formatters")
184
185
186 @mod.capture
187 def formatters(m) -> str:
188 "Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'"
189
190
191 @mod.capture
192 def format_text(m) -> str:
193 "Formats the text and returns a string"
194
195
196 @mod.action_class
197 class Actions:
198 def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
199 """Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')"""
200 return format_phrase(phrase, formatters)
201
202 def formatters_help_toggle():
203 """Lists all formatters"""
204 if gui.showing:
205 gui.hide()
206 else:
207 gui.show()
208
209 def formatters_recent_toggle():
210 """Toggles list of recent formatters"""
211 if recent_gui.showing:
212 recent_gui.hide()
213 else:
214 recent_gui.show()
215
216 def formatters_recent_select(number: int):
217 """Inserts a recent formatter"""
218 if len(formatted_phrase_history) >= number:
219 return formatted_phrase_history[number - 1]
220 return ""
221
222 def formatters_clear_last():
223 """Clears the last formatted phrase"""
224 if len(formatted_phrase_history) > 0:
225 for character in formatted_phrase_history[0]:
226 actions.edit.delete()
227
228 def formatters_reformat_last(formatters: str) -> str:
229 """Reformats last formatted phrase"""
230 global last_phrase
231 return format_phrase(last_phrase, formatters)
232
233 def formatters_reformat_selection(formatters: str) -> str:
234 """Reformats the current selection."""
235 selected = edit.selected_text()
236 unformatted = re.sub(r"[^a-zA-Z0-9]+", " ", selected).lower()
237 # TODO: Separate out camelcase & studleycase vars
238
239 # Delete separately for compatibility with programs that don't overwrite
240 # selected text (e.g. Emacs)
241 edit.delete()
242 text = actions.self.formatted_text(unformatted, formatters)
243 actions.insert(text)
244 return text
245
246
247 @ctx.capture(rule="{self.formatters}+")
248 def formatters(m):
249 return ",".join(m.formatters_list)
250
251
252 @ctx.capture(rule="<self.formatters> <user.text>")
253 def format_text(m):
254 return format_phrase(m.text, m.formatters)
255
256
257 ctx.lists["self.formatters"] = formatters_words.keys()
258
259
260 @imgui.open(software=False)
261 def gui(gui: imgui.GUI):
262 gui.text("List formatters")
263 gui.line()
264 for name in sorted(set(formatters_words.keys())):
265 gui.text(f"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}")
266
267
268 @imgui.open(software=False)
269 def recent_gui(gui: imgui.GUI):
270 gui.text("Recent formatters")
271 gui.line()
272 for index, result in enumerate(formatted_phrase_history, 1):
273 gui.text("{}. {}".format(index, result))
274
[end of code/formatters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/code/formatters.py b/code/formatters.py
--- a/code/formatters.py
+++ b/code/formatters.py
@@ -193,6 +193,22 @@
"Formats the text and returns a string"
+class ImmuneString(object):
+ """Wrapper that makes a string immune from formatting."""
+
+ def __init__(self, string):
+ self.string = string
+
+
[email protected]
+def formatter_immune(m) -> ImmuneString:
+ """Text that can be interspersed into a formatter, e.g. characters.
+
+ It will be inserted directly, without being formatted.
+
+ """
+
+
@mod.action_class
class Actions:
def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
@@ -243,15 +259,45 @@
actions.insert(text)
return text
+ def insert_many(strings: List[str]) -> None:
+ """Insert a list of strings, sequentially."""
+ for string in strings:
+ actions.insert(string)
+
@ctx.capture(rule="{self.formatters}+")
def formatters(m):
return ",".join(m.formatters_list)
[email protected](rule="<self.formatters> <user.text>")
[email protected](
+ # Add anything else into this that you want to be able to speak during a
+ # formatter.
+ rule="(<user.symbol_key> | <user.letter> | numb <number>)"
+)
+def formatter_immune(m) -> ImmuneString:
+ if hasattr(m, "number"):
+ value = m.number
+ else:
+ value = m[0]
+ return ImmuneString(str(value))
+
+
[email protected](
+ # Note that if the user speaks something like "snake dot", it will
+ # insert "dot" - otherwise, they wouldn't be able to insert punctuation
+ # words directly.
+ rule="<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*"
+)
def format_text(m):
- return format_phrase(m.text, m.formatters)
+ out = ""
+ formatters = m[0]
+ for chunk in m[1:]:
+ if isinstance(chunk, ImmuneString):
+ out += chunk.string
+ else:
+ out += format_phrase(chunk, formatters)
+ return out
ctx.lists["self.formatters"] = formatters_words.keys()
| {"golden_diff": "diff --git a/code/formatters.py b/code/formatters.py\n--- a/code/formatters.py\n+++ b/code/formatters.py\n@@ -193,6 +193,22 @@\n \"Formats the text and returns a string\"\n \n \n+class ImmuneString(object):\n+ \"\"\"Wrapper that makes a string immune from formatting.\"\"\"\n+\n+ def __init__(self, string):\n+ self.string = string\n+\n+\[email protected]\n+def formatter_immune(m) -> ImmuneString:\n+ \"\"\"Text that can be interspersed into a formatter, e.g. characters.\n+\n+ It will be inserted directly, without being formatted.\n+\n+ \"\"\"\n+\n+\n @mod.action_class\n class Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n@@ -243,15 +259,45 @@\n actions.insert(text)\n return text\n \n+ def insert_many(strings: List[str]) -> None:\n+ \"\"\"Insert a list of strings, sequentially.\"\"\"\n+ for string in strings:\n+ actions.insert(string)\n+\n \n @ctx.capture(rule=\"{self.formatters}+\")\n def formatters(m):\n return \",\".join(m.formatters_list)\n \n \[email protected](rule=\"<self.formatters> <user.text>\")\[email protected](\n+ # Add anything else into this that you want to be able to speak during a\n+ # formatter.\n+ rule=\"(<user.symbol_key> | <user.letter> | numb <number>)\"\n+)\n+def formatter_immune(m) -> ImmuneString:\n+ if hasattr(m, \"number\"):\n+ value = m.number\n+ else:\n+ value = m[0]\n+ return ImmuneString(str(value))\n+\n+\[email protected](\n+ # Note that if the user speaks something like \"snake dot\", it will\n+ # insert \"dot\" - otherwise, they wouldn't be able to insert punctuation\n+ # words directly.\n+ rule=\"<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*\"\n+)\n def format_text(m):\n- return format_phrase(m.text, m.formatters)\n+ out = \"\"\n+ formatters = m[0]\n+ for chunk in m[1:]:\n+ if isinstance(chunk, ImmuneString):\n+ out += chunk.string\n+ else:\n+ out += format_phrase(chunk, formatters)\n+ return out\n \n \n ctx.lists[\"self.formatters\"] = formatters_words.keys()\n", "issue": "feature request: `phrase hello slash world` -> `hello/world`\nI'm not sure what the best way to go about this is, it might be simple.\r\n\r\nsome more examples/unit tests:\r\n\r\n- `phrase hello slash world` -> `hello/world` (currently `hello /world`)\r\n- `phrase hello slash snake name of file slash world` -> `hello/name_of_file/world` (currently `hello/name_of_file_/world`)\r\n\r\nSimilarly: `phrase file dot extension` -> `file.extension`, though this one might be complicated by the fact that I have a formatter called `dot` defined so that `dot hello world` -> `.hello_world` because I use python so much.\n", "before_files": [{"content": "from talon import Module, Context, actions, ui, imgui\nfrom talon.grammar import Phrase\nfrom typing import List, Union\nimport re\n\nctx = Context()\nkey = actions.key\nedit = actions.edit\n\nwords_to_keep_lowercase = \"a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n)\n\n# last_phrase has the last phrase spoken, WITHOUT formatting.\n# This is needed for reformatting.\nlast_phrase = \"\"\n\n# formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.\nformatted_phrase_history = []\nformatted_phrase_history_length = 20\n\n\ndef surround(by):\n def func(i, word, last):\n if i == 0:\n word = by + word\n if last:\n word += by\n return word\n\n return func\n\n\ndef format_phrase(m: Union[str, Phrase], fmtrs: str):\n global last_phrase\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n\n words = actions.dictate.parse_words(m)\n words = actions.dictate.replace_words(words)\n\n result = format_phrase_no_history(words, fmtrs)\n\n # Add result to history.\n global formatted_phrase_history\n formatted_phrase_history.insert(0, result)\n formatted_phrase_history = formatted_phrase_history[\n :formatted_phrase_history_length\n ]\n\n return result\n\n\ndef format_phrase_no_history(word_list, fmtrs: str):\n fmtr_list = fmtrs.split(\",\")\n words = []\n spaces = True\n for i, w in enumerate(word_list):\n for name in reversed(fmtr_list):\n smash, func = all_formatters[name]\n w = func(i, w, i == len(word_list) - 1)\n spaces = spaces and not smash\n words.append(w)\n sep = \" \" if spaces else \"\"\n return sep.join(words)\n\n\nNOSEP = True\nSEP = False\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n\n def formatter_function(i, word, _):\n return word if i == 0 else joiner + word\n\n return (NOSEP, formatter_function)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\"\"\"\n if first_func is None:\n first_func = lambda w: w\n\n def formatter_function(i, word, _):\n return first_func(word) if i == 0 else rest_func(word)\n\n return formatter_function\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n\n def formatter_function(i, word, _):\n return word_func(word)\n\n return formatter_function\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w, lambda w: w.capitalize()),\n ),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n \"FIRST_THREE\": (NOSEP, lambda i, word, _: word[0:3]),\n \"FIRST_FOUR\": (NOSEP, lambda i, word, _: word[0:4]),\n \"FIRST_FIVE\": (NOSEP, lambda i, word, _: word[0:5]),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"allcaps\": formatters_dict[\"ALL_CAPS\"],\n \"alldown\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dubstring\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n # \"say\": formatters_dict[\"NOOP\"],\n \"sentence\": formatters_dict[\"CAPITALIZE_FIRST_WORD\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n # \"speak\": formatters_dict[\"NOOP\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n # disable a few formatters for now\n # \"tree\": formatters_dict[\"FIRST_THREE\"],\n # \"quad\": formatters_dict[\"FIRST_FOUR\"],\n # \"fiver\": formatters_dict[\"FIRST_FIVE\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\n\n\[email protected]\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n\n\[email protected]\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def formatters_help_toggle():\n \"\"\"Lists all formatters\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def formatters_recent_toggle():\n \"\"\"Toggles list of recent formatters\"\"\"\n if recent_gui.showing:\n recent_gui.hide()\n else:\n recent_gui.show()\n\n def formatters_recent_select(number: int):\n \"\"\"Inserts a recent formatter\"\"\"\n if len(formatted_phrase_history) >= number:\n return formatted_phrase_history[number - 1]\n return \"\"\n\n def formatters_clear_last():\n \"\"\"Clears the last formatted phrase\"\"\"\n if len(formatted_phrase_history) > 0:\n for character in formatted_phrase_history[0]:\n actions.edit.delete()\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Reformats last formatted phrase\"\"\"\n global last_phrase\n return format_phrase(last_phrase, formatters)\n\n def formatters_reformat_selection(formatters: str) -> str:\n \"\"\"Reformats the current selection.\"\"\"\n selected = edit.selected_text()\n unformatted = re.sub(r\"[^a-zA-Z0-9]+\", \" \", selected).lower()\n # TODO: Separate out camelcase & studleycase vars\n\n # Delete separately for compatibility with programs that don't overwrite\n # selected text (e.g. Emacs)\n edit.delete()\n text = actions.self.formatted_text(unformatted, formatters)\n actions.insert(text)\n return text\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m):\n return \",\".join(m.formatters_list)\n\n\[email protected](rule=\"<self.formatters> <user.text>\")\ndef format_text(m):\n return format_phrase(m.text, m.formatters)\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"List formatters\")\n gui.line()\n for name in sorted(set(formatters_words.keys())):\n gui.text(f\"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}\")\n\n\[email protected](software=False)\ndef recent_gui(gui: imgui.GUI):\n gui.text(\"Recent formatters\")\n gui.line()\n for index, result in enumerate(formatted_phrase_history, 1):\n gui.text(\"{}. {}\".format(index, result))\n", "path": "code/formatters.py"}]} | 3,601 | 551 |
gh_patches_debug_22889 | rasdani/github-patches | git_diff | ARM-DOE__ACT-500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Fix QC bug in tests
Now CI is failing due to some bug on ubuntu systems
```bash
def test_qc_flag_description():
[32](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:33)
"""
[33](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:34)
This will check if the cleanup() method will correctly convert convert
[34](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:35)
flag_#_description to CF flag_masks and flag_meanings.
[35](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:36)
[36](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:37)
"""
[37](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:38)
[38](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:39)
ds = read_netcdf(EXAMPLE_CO2FLX4M)
[39](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:40)
ds.clean.cleanup()
[40](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:41)
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
[41](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:42)
'momentum_flux', add_if_missing=False, cleanup=False
[42](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:43)
)
[43](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:44)
[44](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:45)
assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)
[45](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:46)
assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)
[46](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:47)
assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)
[47](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:48)
assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'
[48](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:49)
[49](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:50)
assert len(ds[qc_var_name].attrs['flag_masks']) == 9
[50](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:51)
unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})
[51](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:52)
> assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments
[52](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:53)
E AssertionError: assert ['Indetermina...table', 'Bad'] == ['Indetermina... 'Acceptable']
[53](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:54)
E At index 1 diff: 'Acceptable' != 'Bad'
[54](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:55)
E Full diff:
[55](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:56)
E - ['Indeterminate', 'Bad', 'Acceptable']
[56](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:57)
E ? -------
[57](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:58)
E + ['Indeterminate', 'Acceptable', 'Bad']
[58](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:59)
E ? +++++++
[59](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:60)
[60](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:61)
act/tests/test_qc.py:814: AssertionError
```
</issue>
<code>
[start of act/discovery/get_armfiles.py]
1 """
2 Script for downloading data from ARM's Live Data Webservice
3
4 """
5
6 import argparse
7 import json
8 import os
9 import sys
10
11 try:
12 from urllib.request import urlopen
13 except ImportError:
14 from urllib import urlopen
15
16 from act.utils import date_parser
17
18
19 def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
20 """
21 This tool will help users utilize the ARM Live Data Webservice to download
22 ARM data.
23
24 Parameters
25 ----------
26 username : str
27 The username to use for logging into the ADC archive.
28 token : str
29 The access token for accessing the ADC archive.
30 datastream : str
31 The name of the datastream to acquire.
32 startdate : str
33 The start date of the data to acquire. Formats accepted are
34 YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
35 any of the previous formats with THH:MM:SS added onto the end
36 (ex. 2020-09-15T12:00:00).
37 enddate : str
38 The end date of the data to acquire. Formats accepted are
39 YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
40 any of the previous formats with THH:MM:SS added onto the end
41 (ex. 2020-09-15T13:00:00).
42 time: str or None
43 The specific time. Format is HHMMSS. Set to None to download all files
44 in the given date interval.
45 output : str
46 The output directory for the data. Set to None to make a folder in the
47 current working directory with the same name as *datastream* to place
48 the files in.
49
50 Returns
51 -------
52 files : list
53 Returns list of files retrieved
54
55 Notes
56 -----
57 This programmatic interface allows users to query and automate
58 machine-to-machine downloads of ARM data. This tool uses a REST URL and
59 specific parameters (saveData, query), user ID and access token, a
60 datastream name, a start date, and an end date, and data files matching
61 the criteria will be returned to the user and downloaded.
62
63 By using this web service, users can setup cron jobs and automatically
64 download data from /data/archive into their workspace. This will also
65 eliminate the manual step of following a link in an email to download data.
66 All other data files, which are not on the spinning
67 disk (on HPSS), will have to go through the regular ordering process.
68 More information about this REST API and tools can be found on `ARM Live
69 <https://adc.arm.gov/armlive/#scripts>`_.
70
71 To login/register for an access token click `here
72 <https://adc.arm.gov/armlive/livedata/home>`_.
73
74 Author: Michael Giansiracusa
75 Email: [email protected]
76
77 Web Tools Contact: Ranjeet Devarakonda [email protected]
78
79 Examples
80 --------
81 This code will download the netCDF files from the sgpmetE13.b1 datastream
82 and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
83 20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
84 with your username and token for ARM Data Discovery. See the Notes for
85 information on how to obtain a username and token.
86
87 .. code-block:: python
88
89 act.discovery.download_data(
90 "userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
91 )
92
93 """
94 # default start and end are empty
95 start, end = '', ''
96 # start and end strings for query_url are constructed
97 # if the arguments were provided
98 if startdate:
99 start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
100 start = f'&start={start}'
101 if enddate:
102 end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
103 end = f'&end={end}'
104 # build the url to query the web service using the arguments provided
105 query_url = (
106 'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
107 ).format(':'.join([username, token]), datastream, start, end)
108
109 # get url response, read the body of the message,
110 # and decode from bytes type to utf-8 string
111 response_body = urlopen(query_url).read().decode('utf-8')
112 # if the response is an html doc, then there was an error with the user
113 if response_body[1:14] == '!DOCTYPE html':
114 raise ConnectionRefusedError('Error with user. Check username or token.')
115
116 # parse into json object
117 response_body_json = json.loads(response_body)
118
119 # construct output directory
120 if output:
121 # output files to directory specified
122 output_dir = os.path.join(output)
123 else:
124 # if no folder given, add datastream folder
125 # to current working dir to prevent file mix-up
126 output_dir = os.path.join(os.getcwd(), datastream)
127
128 # not testing, response is successful and files were returned
129 if response_body_json is None:
130 print('ARM Data Live Webservice does not appear to be functioning')
131 return []
132
133 num_files = len(response_body_json['files'])
134 file_names = []
135 if response_body_json['status'] == 'success' and num_files > 0:
136 for fname in response_body_json['files']:
137 if time is not None:
138 if time not in fname:
139 continue
140 print(f'[DOWNLOADING] {fname}')
141 # construct link to web service saveData function
142 save_data_url = (
143 'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
144 ).format(':'.join([username, token]), fname)
145 output_file = os.path.join(output_dir, fname)
146 # make directory if it doesn't exist
147 if not os.path.isdir(output_dir):
148 os.makedirs(output_dir)
149 # create file and write bytes to file
150 with open(output_file, 'wb') as open_bytes_file:
151 open_bytes_file.write(urlopen(save_data_url).read())
152 file_names.append(output_file)
153 else:
154 print(
155 'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
156 )
157
158 return file_names
159
[end of act/discovery/get_armfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/discovery/get_armfiles.py b/act/discovery/get_armfiles.py
--- a/act/discovery/get_armfiles.py
+++ b/act/discovery/get_armfiles.py
@@ -7,6 +7,7 @@
import json
import os
import sys
+from datetime import timedelta
try:
from urllib.request import urlopen
@@ -96,10 +97,15 @@
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
- start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
+ start_datetime = date_parser(startdate, return_datetime=True)
+ start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
- end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
+ end_datetime = date_parser(enddate, return_datetime=True)
+ # If the start and end date are the same, and a day to the end date
+ if start_datetime == end_datetime:
+ end_datetime += timedelta(days=1)
+ end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
| {"golden_diff": "diff --git a/act/discovery/get_armfiles.py b/act/discovery/get_armfiles.py\n--- a/act/discovery/get_armfiles.py\n+++ b/act/discovery/get_armfiles.py\n@@ -7,6 +7,7 @@\n import json\n import os\n import sys\n+from datetime import timedelta\n \n try:\n from urllib.request import urlopen\n@@ -96,10 +97,15 @@\n # start and end strings for query_url are constructed\n # if the arguments were provided\n if startdate:\n- start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n+ start_datetime = date_parser(startdate, return_datetime=True)\n+ start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n start = f'&start={start}'\n if enddate:\n- end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n+ end_datetime = date_parser(enddate, return_datetime=True)\n+ # If the start and end date are the same, and a day to the end date\n+ if start_datetime == end_datetime:\n+ end_datetime += timedelta(days=1)\n+ end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n end = f'&end={end}'\n # build the url to query the web service using the arguments provided\n query_url = (\n", "issue": "BUG: Fix QC bug in tests\nNow CI is failing due to some bug on ubuntu systems\r\n\r\n```bash\r\ndef test_qc_flag_description():\r\n[32](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:33)\r\n \"\"\"\r\n[33](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:34)\r\n This will check if the cleanup() method will correctly convert convert\r\n[34](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:35)\r\n flag_#_description to CF flag_masks and flag_meanings.\r\n[35](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:36)\r\n \r\n[36](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:37)\r\n \"\"\"\r\n[37](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:38)\r\n \r\n[38](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:39)\r\n ds = read_netcdf(EXAMPLE_CO2FLX4M)\r\n[39](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:40)\r\n ds.clean.cleanup()\r\n[40](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:41)\r\n qc_var_name = ds.qcfilter.check_for_ancillary_qc(\r\n[41](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:42)\r\n 'momentum_flux', add_if_missing=False, cleanup=False\r\n[42](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:43)\r\n )\r\n[43](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:44)\r\n \r\n[44](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:45)\r\n assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)\r\n[45](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:46)\r\n assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)\r\n[46](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:47)\r\n assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)\r\n[47](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:48)\r\n assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'\r\n[48](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:49)\r\n \r\n[49](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:50)\r\n assert len(ds[qc_var_name].attrs['flag_masks']) == 9\r\n[50](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:51)\r\n unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})\r\n[51](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:52)\r\n> assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments\r\n[52](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:53)\r\nE AssertionError: assert ['Indetermina...table', 'Bad'] == ['Indetermina... 'Acceptable']\r\n[53](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:54)\r\nE At index 1 diff: 'Acceptable' != 'Bad'\r\n[54](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:55)\r\nE Full diff:\r\n[55](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:56)\r\nE - ['Indeterminate', 'Bad', 'Acceptable']\r\n[56](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:57)\r\nE ? -------\r\n[57](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:58)\r\nE + ['Indeterminate', 'Acceptable', 'Bad']\r\n[58](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:59)\r\nE ? +++++++\r\n[59](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:60)\r\n\r\n[60](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:61)\r\nact/tests/test_qc.py:814: AssertionError\r\n```\n", "before_files": [{"content": "\"\"\"\nScript for downloading data from ARM's Live Data Webservice\n\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlopen\n\nfrom act.utils import date_parser\n\n\ndef download_data(username, token, datastream, startdate, enddate, time=None, output=None):\n \"\"\"\n This tool will help users utilize the ARM Live Data Webservice to download\n ARM data.\n\n Parameters\n ----------\n username : str\n The username to use for logging into the ADC archive.\n token : str\n The access token for accessing the ADC archive.\n datastream : str\n The name of the datastream to acquire.\n startdate : str\n The start date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T12:00:00).\n enddate : str\n The end date of the data to acquire. Formats accepted are\n YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or\n any of the previous formats with THH:MM:SS added onto the end\n (ex. 2020-09-15T13:00:00).\n time: str or None\n The specific time. Format is HHMMSS. Set to None to download all files\n in the given date interval.\n output : str\n The output directory for the data. Set to None to make a folder in the\n current working directory with the same name as *datastream* to place\n the files in.\n\n Returns\n -------\n files : list\n Returns list of files retrieved\n\n Notes\n -----\n This programmatic interface allows users to query and automate\n machine-to-machine downloads of ARM data. This tool uses a REST URL and\n specific parameters (saveData, query), user ID and access token, a\n datastream name, a start date, and an end date, and data files matching\n the criteria will be returned to the user and downloaded.\n\n By using this web service, users can setup cron jobs and automatically\n download data from /data/archive into their workspace. This will also\n eliminate the manual step of following a link in an email to download data.\n All other data files, which are not on the spinning\n disk (on HPSS), will have to go through the regular ordering process.\n More information about this REST API and tools can be found on `ARM Live\n <https://adc.arm.gov/armlive/#scripts>`_.\n\n To login/register for an access token click `here\n <https://adc.arm.gov/armlive/livedata/home>`_.\n\n Author: Michael Giansiracusa\n Email: [email protected]\n\n Web Tools Contact: Ranjeet Devarakonda [email protected]\n\n Examples\n --------\n This code will download the netCDF files from the sgpmetE13.b1 datastream\n and place them in a directory named sgpmetE13.b1. The data from 14 Jan to\n 20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*\n with your username and token for ARM Data Discovery. See the Notes for\n information on how to obtain a username and token.\n\n .. code-block:: python\n\n act.discovery.download_data(\n \"userName\", \"XXXXXXXXXXXXXXXX\", \"sgpmetE13.b1\", \"2017-01-14\", \"2017-01-20\"\n )\n\n \"\"\"\n # default start and end are empty\n start, end = '', ''\n # start and end strings for query_url are constructed\n # if the arguments were provided\n if startdate:\n start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n start = f'&start={start}'\n if enddate:\n end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n end = f'&end={end}'\n # build the url to query the web service using the arguments provided\n query_url = (\n 'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'\n ).format(':'.join([username, token]), datastream, start, end)\n\n # get url response, read the body of the message,\n # and decode from bytes type to utf-8 string\n response_body = urlopen(query_url).read().decode('utf-8')\n # if the response is an html doc, then there was an error with the user\n if response_body[1:14] == '!DOCTYPE html':\n raise ConnectionRefusedError('Error with user. Check username or token.')\n\n # parse into json object\n response_body_json = json.loads(response_body)\n\n # construct output directory\n if output:\n # output files to directory specified\n output_dir = os.path.join(output)\n else:\n # if no folder given, add datastream folder\n # to current working dir to prevent file mix-up\n output_dir = os.path.join(os.getcwd(), datastream)\n\n # not testing, response is successful and files were returned\n if response_body_json is None:\n print('ARM Data Live Webservice does not appear to be functioning')\n return []\n\n num_files = len(response_body_json['files'])\n file_names = []\n if response_body_json['status'] == 'success' and num_files > 0:\n for fname in response_body_json['files']:\n if time is not None:\n if time not in fname:\n continue\n print(f'[DOWNLOADING] {fname}')\n # construct link to web service saveData function\n save_data_url = (\n 'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'\n ).format(':'.join([username, token]), fname)\n output_file = os.path.join(output_dir, fname)\n # make directory if it doesn't exist\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n # create file and write bytes to file\n with open(output_file, 'wb') as open_bytes_file:\n open_bytes_file.write(urlopen(save_data_url).read())\n file_names.append(output_file)\n else:\n print(\n 'No files returned or url status error.\\n' 'Check datastream name, start, and end date.'\n )\n\n return file_names\n", "path": "act/discovery/get_armfiles.py"}]} | 3,979 | 351 |
gh_patches_debug_30993 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-1609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[vec][search] update to paddlespeech model
</issue>
<code>
[start of demos/audio_searching/src/config.py]
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 ############### Milvus Configuration ###############
17 MILVUS_HOST = os.getenv("MILVUS_HOST", "127.0.0.1")
18 MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
19 VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "2048"))
20 INDEX_FILE_SIZE = int(os.getenv("INDEX_FILE_SIZE", "1024"))
21 METRIC_TYPE = os.getenv("METRIC_TYPE", "L2")
22 DEFAULT_TABLE = os.getenv("DEFAULT_TABLE", "audio_table")
23 TOP_K = int(os.getenv("TOP_K", "10"))
24
25 ############### MySQL Configuration ###############
26 MYSQL_HOST = os.getenv("MYSQL_HOST", "127.0.0.1")
27 MYSQL_PORT = int(os.getenv("MYSQL_PORT", "3306"))
28 MYSQL_USER = os.getenv("MYSQL_USER", "root")
29 MYSQL_PWD = os.getenv("MYSQL_PWD", "123456")
30 MYSQL_DB = os.getenv("MYSQL_DB", "mysql")
31
32 ############### Data Path ###############
33 UPLOAD_PATH = os.getenv("UPLOAD_PATH", "tmp/audio-data")
34
35 ############### Number of Log Files ###############
36 LOGS_NUM = int(os.getenv("logs_num", "0"))
37
[end of demos/audio_searching/src/config.py]
[start of demos/audio_searching/src/encode.py]
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 import librosa
17 import numpy as np
18 from logs import LOGGER
19
20
21 def get_audio_embedding(path):
22 """
23 Use vpr_inference to generate embedding of audio
24 """
25 try:
26 RESAMPLE_RATE = 16000
27 audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)
28
29 # TODO add infer/python interface to get embedding, now fake it by rand
30 # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')
31 # embedding = vpr.inference(audio)
32 np.random.seed(hash(os.path.basename(path)) % 1000000)
33 embedding = np.random.rand(1, 2048)
34 embedding = embedding / np.linalg.norm(embedding)
35 embedding = embedding.tolist()[0]
36 return embedding
37 except Exception as e:
38 LOGGER.error(f"Error with embedding:{e}")
39 return None
40
[end of demos/audio_searching/src/encode.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demos/audio_searching/src/config.py b/demos/audio_searching/src/config.py
--- a/demos/audio_searching/src/config.py
+++ b/demos/audio_searching/src/config.py
@@ -16,7 +16,7 @@
############### Milvus Configuration ###############
MILVUS_HOST = os.getenv("MILVUS_HOST", "127.0.0.1")
MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
-VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "2048"))
+VECTOR_DIMENSION = int(os.getenv("VECTOR_DIMENSION", "192"))
INDEX_FILE_SIZE = int(os.getenv("INDEX_FILE_SIZE", "1024"))
METRIC_TYPE = os.getenv("METRIC_TYPE", "L2")
DEFAULT_TABLE = os.getenv("DEFAULT_TABLE", "audio_table")
diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py
--- a/demos/audio_searching/src/encode.py
+++ b/demos/audio_searching/src/encode.py
@@ -15,7 +15,12 @@
import librosa
import numpy as np
+from config import DEFAULT_TABLE
+
from logs import LOGGER
+from paddlespeech.cli import VectorExecutor
+
+vector_executor = VectorExecutor()
def get_audio_embedding(path):
@@ -23,16 +28,9 @@
Use vpr_inference to generate embedding of audio
"""
try:
- RESAMPLE_RATE = 16000
- audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)
-
- # TODO add infer/python interface to get embedding, now fake it by rand
- # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')
- # embedding = vpr.inference(audio)
- np.random.seed(hash(os.path.basename(path)) % 1000000)
- embedding = np.random.rand(1, 2048)
+ embedding = vector_executor(audio_file=path)
embedding = embedding / np.linalg.norm(embedding)
- embedding = embedding.tolist()[0]
+ embedding = embedding.tolist()
return embedding
except Exception as e:
LOGGER.error(f"Error with embedding:{e}")
| {"golden_diff": "diff --git a/demos/audio_searching/src/config.py b/demos/audio_searching/src/config.py\n--- a/demos/audio_searching/src/config.py\n+++ b/demos/audio_searching/src/config.py\n@@ -16,7 +16,7 @@\n ############### Milvus Configuration ###############\n MILVUS_HOST = os.getenv(\"MILVUS_HOST\", \"127.0.0.1\")\n MILVUS_PORT = int(os.getenv(\"MILVUS_PORT\", \"19530\"))\n-VECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"2048\"))\n+VECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"192\"))\n INDEX_FILE_SIZE = int(os.getenv(\"INDEX_FILE_SIZE\", \"1024\"))\n METRIC_TYPE = os.getenv(\"METRIC_TYPE\", \"L2\")\n DEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"audio_table\")\ndiff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py\n--- a/demos/audio_searching/src/encode.py\n+++ b/demos/audio_searching/src/encode.py\n@@ -15,7 +15,12 @@\n \n import librosa\n import numpy as np\n+from config import DEFAULT_TABLE\n+\n from logs import LOGGER\n+from paddlespeech.cli import VectorExecutor\n+\n+vector_executor = VectorExecutor()\n \n \n def get_audio_embedding(path):\n@@ -23,16 +28,9 @@\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n- RESAMPLE_RATE = 16000\n- audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)\n-\n- # TODO add infer/python interface to get embedding, now fake it by rand\n- # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')\n- # embedding = vpr.inference(audio)\n- np.random.seed(hash(os.path.basename(path)) % 1000000)\n- embedding = np.random.rand(1, 2048)\n+ embedding = vector_executor(audio_file=path)\n embedding = embedding / np.linalg.norm(embedding)\n- embedding = embedding.tolist()[0]\n+ embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n", "issue": "[vec][search] update to paddlespeech model\n\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\n############### Milvus Configuration ###############\nMILVUS_HOST = os.getenv(\"MILVUS_HOST\", \"127.0.0.1\")\nMILVUS_PORT = int(os.getenv(\"MILVUS_PORT\", \"19530\"))\nVECTOR_DIMENSION = int(os.getenv(\"VECTOR_DIMENSION\", \"2048\"))\nINDEX_FILE_SIZE = int(os.getenv(\"INDEX_FILE_SIZE\", \"1024\"))\nMETRIC_TYPE = os.getenv(\"METRIC_TYPE\", \"L2\")\nDEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"audio_table\")\nTOP_K = int(os.getenv(\"TOP_K\", \"10\"))\n\n############### MySQL Configuration ###############\nMYSQL_HOST = os.getenv(\"MYSQL_HOST\", \"127.0.0.1\")\nMYSQL_PORT = int(os.getenv(\"MYSQL_PORT\", \"3306\"))\nMYSQL_USER = os.getenv(\"MYSQL_USER\", \"root\")\nMYSQL_PWD = os.getenv(\"MYSQL_PWD\", \"123456\")\nMYSQL_DB = os.getenv(\"MYSQL_DB\", \"mysql\")\n\n############### Data Path ###############\nUPLOAD_PATH = os.getenv(\"UPLOAD_PATH\", \"tmp/audio-data\")\n\n############### Number of Log Files ###############\nLOGS_NUM = int(os.getenv(\"logs_num\", \"0\"))\n", "path": "demos/audio_searching/src/config.py"}, {"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport librosa\nimport numpy as np\nfrom logs import LOGGER\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n RESAMPLE_RATE = 16000\n audio, _ = librosa.load(path, sr=RESAMPLE_RATE, mono=True)\n\n # TODO add infer/python interface to get embedding, now fake it by rand\n # vpr = ECAPATDNN(checkpoint_path=None, device='cuda')\n # embedding = vpr.inference(audio)\n np.random.seed(hash(os.path.basename(path)) % 1000000)\n embedding = np.random.rand(1, 2048)\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()[0]\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}]} | 1,459 | 511 |
gh_patches_debug_14 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Browse Page Map: opening a country link has different behaviors
From the map: open in new tab
From the list: open in same tab
We should make it the same: open in same tab (unless there was some specification that it should be a new tab that I'm not remembering.
Graphic in Colombia page: instead of line (time-series) make it a bar graph.
CJ added current action for this issue:
- Change "Number of IDPs" graph **from** bar graph **to** line graph.
-----------------Original issue text follows---------------------
I think the graph **Number of people with access constrains** would look better if it was a bar graph instead of a line, time-series:

The reason I think that is that the lines give the impression the indicator changes significantly every month, but in a continuum of time. Bar graphs will help the user compare months as nearly independent measurements, which is influences better consumption of the data in my opinion.
I chatted with the Data Team about this (including @JavierTeran) and they've approved this suggestion.
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.6.1'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.6.1'
+hdx_version = 'v0.6.2'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.6.1'\n+hdx_version = 'v0.6.2'\n", "issue": "Browse Page Map: opening a country link has different behaviors\nFrom the map: open in new tab\nFrom the list: open in same tab\n\nWe should make it the same: open in same tab (unless there was some specification that it should be a new tab that I'm not remembering. \n\nGraphic in Colombia page: instead of line (time-series) make it a bar graph.\nCJ added current action for this issue:\n- Change \"Number of IDPs\" graph **from** bar graph **to** line graph. \n\n-----------------Original issue text follows---------------------\nI think the graph **Number of people with access constrains** would look better if it was a bar graph instead of a line, time-series: \n\n\n\nThe reason I think that is that the lines give the impression the indicator changes significantly every month, but in a continuum of time. Bar graphs will help the user compare months as nearly independent measurements, which is influences better consumption of the data in my opinion. \n\nI chatted with the Data Team about this (including @JavierTeran) and they've approved this suggestion.\n\n", "before_files": [{"content": "hdx_version = 'v0.6.1'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 877 | 106 |
gh_patches_debug_2059 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The schema is empty for unauthorised users.
Another problem with this - the schema is empty for unauthorised users. You need to add `public=True` to `get_schema_view`.
_Originally posted by @jmsmkn in https://github.com/comic/grand-challenge.org/issues/1017#issuecomment-567254400_
</issue>
<code>
[start of app/grandchallenge/api/urls.py]
1 from django.conf import settings
2 from django.conf.urls import include, url
3 from django.urls import path
4 from drf_yasg import openapi
5 from drf_yasg.views import get_schema_view
6 from rest_framework import permissions, routers
7
8 from grandchallenge.algorithms.views import (
9 AlgorithmImageViewSet,
10 AlgorithmViewSet,
11 JobViewSet,
12 ResultViewSet,
13 )
14 from grandchallenge.cases.views import (
15 ImageViewSet,
16 RawImageUploadSessionViewSet,
17 )
18 from grandchallenge.jqfileupload.views import StagedFileViewSet
19 from grandchallenge.reader_studies.views import (
20 AnswerViewSet,
21 QuestionViewSet,
22 ReaderStudyViewSet,
23 )
24 from grandchallenge.retina_api.views import LandmarkAnnotationSetViewSet
25 from grandchallenge.subdomains.utils import reverse_lazy
26 from grandchallenge.workstation_configs.views import WorkstationConfigViewSet
27 from grandchallenge.workstations.views import SessionViewSet
28
29 app_name = "api"
30
31 router = routers.DefaultRouter()
32 router.register(
33 r"cases/upload-sessions",
34 RawImageUploadSessionViewSet,
35 basename="upload-session",
36 )
37 router.register(r"cases/images", ImageViewSet, basename="image")
38 router.register(r"workstations/sessions", SessionViewSet)
39 router.register(
40 r"workstations/configs",
41 WorkstationConfigViewSet,
42 basename="workstations-config",
43 )
44 router.register(r"algorithms/jobs", JobViewSet, basename="algorithms-job")
45 router.register(
46 r"algorithms/results", ResultViewSet, basename="algorithms-result"
47 )
48 router.register(
49 r"algorithms/images", AlgorithmImageViewSet, basename="algorithms-image"
50 )
51 router.register(r"algorithms", AlgorithmViewSet, basename="algorithm")
52
53 router.register(
54 r"reader-studies/answers", AnswerViewSet, basename="reader-studies-answer"
55 )
56 router.register(
57 r"reader-studies/questions",
58 QuestionViewSet,
59 basename="reader-studies-question",
60 )
61 router.register(r"reader-studies", ReaderStudyViewSet, basename="reader-study")
62 router.register(r"chunked-uploads", StagedFileViewSet, basename="staged-file")
63
64 router.register(
65 r"retina/landmark-annotation",
66 LandmarkAnnotationSetViewSet,
67 basename="landmark-annotation",
68 )
69
70 # TODO: add terms_of_service and contact
71 schema_view = get_schema_view(
72 openapi.Info(
73 title=f"{settings.SESSION_COOKIE_DOMAIN.lstrip('.')} API",
74 default_version="v1",
75 description=f"The API for {settings.SESSION_COOKIE_DOMAIN.lstrip('.')}.",
76 license=openapi.License(name="Apache License 2.0"),
77 terms_of_service=reverse_lazy(
78 "policies:detail", kwargs={"slug": "terms-of-service"}
79 ),
80 ),
81 permission_classes=(permissions.AllowAny,),
82 patterns=[path("api/v1/", include(router.urls))],
83 )
84
85 urlpatterns = [
86 url(
87 r"^swagger(?P<format>\.json|\.yaml)$",
88 schema_view.without_ui(),
89 name="schema-json",
90 ),
91 # Do not namespace the router.urls without updating the view names in
92 # the serializers
93 path("v1/", include(router.urls)),
94 path("auth/", include("rest_framework.urls", namespace="rest_framework")),
95 path("", schema_view.with_ui("swagger"), name="schema-docs"),
96 ]
97
[end of app/grandchallenge/api/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/api/urls.py b/app/grandchallenge/api/urls.py
--- a/app/grandchallenge/api/urls.py
+++ b/app/grandchallenge/api/urls.py
@@ -78,6 +78,7 @@
"policies:detail", kwargs={"slug": "terms-of-service"}
),
),
+ public=True,
permission_classes=(permissions.AllowAny,),
patterns=[path("api/v1/", include(router.urls))],
)
| {"golden_diff": "diff --git a/app/grandchallenge/api/urls.py b/app/grandchallenge/api/urls.py\n--- a/app/grandchallenge/api/urls.py\n+++ b/app/grandchallenge/api/urls.py\n@@ -78,6 +78,7 @@\n \"policies:detail\", kwargs={\"slug\": \"terms-of-service\"}\n ),\n ),\n+ public=True,\n permission_classes=(permissions.AllowAny,),\n patterns=[path(\"api/v1/\", include(router.urls))],\n )\n", "issue": "The schema is empty for unauthorised users.\nAnother problem with this - the schema is empty for unauthorised users. You need to add `public=True` to `get_schema_view`.\r\n\r\n_Originally posted by @jmsmkn in https://github.com/comic/grand-challenge.org/issues/1017#issuecomment-567254400_\n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.urls import path\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom grandchallenge.algorithms.views import (\n AlgorithmImageViewSet,\n AlgorithmViewSet,\n JobViewSet,\n ResultViewSet,\n)\nfrom grandchallenge.cases.views import (\n ImageViewSet,\n RawImageUploadSessionViewSet,\n)\nfrom grandchallenge.jqfileupload.views import StagedFileViewSet\nfrom grandchallenge.reader_studies.views import (\n AnswerViewSet,\n QuestionViewSet,\n ReaderStudyViewSet,\n)\nfrom grandchallenge.retina_api.views import LandmarkAnnotationSetViewSet\nfrom grandchallenge.subdomains.utils import reverse_lazy\nfrom grandchallenge.workstation_configs.views import WorkstationConfigViewSet\nfrom grandchallenge.workstations.views import SessionViewSet\n\napp_name = \"api\"\n\nrouter = routers.DefaultRouter()\nrouter.register(\n r\"cases/upload-sessions\",\n RawImageUploadSessionViewSet,\n basename=\"upload-session\",\n)\nrouter.register(r\"cases/images\", ImageViewSet, basename=\"image\")\nrouter.register(r\"workstations/sessions\", SessionViewSet)\nrouter.register(\n r\"workstations/configs\",\n WorkstationConfigViewSet,\n basename=\"workstations-config\",\n)\nrouter.register(r\"algorithms/jobs\", JobViewSet, basename=\"algorithms-job\")\nrouter.register(\n r\"algorithms/results\", ResultViewSet, basename=\"algorithms-result\"\n)\nrouter.register(\n r\"algorithms/images\", AlgorithmImageViewSet, basename=\"algorithms-image\"\n)\nrouter.register(r\"algorithms\", AlgorithmViewSet, basename=\"algorithm\")\n\nrouter.register(\n r\"reader-studies/answers\", AnswerViewSet, basename=\"reader-studies-answer\"\n)\nrouter.register(\n r\"reader-studies/questions\",\n QuestionViewSet,\n basename=\"reader-studies-question\",\n)\nrouter.register(r\"reader-studies\", ReaderStudyViewSet, basename=\"reader-study\")\nrouter.register(r\"chunked-uploads\", StagedFileViewSet, basename=\"staged-file\")\n\nrouter.register(\n r\"retina/landmark-annotation\",\n LandmarkAnnotationSetViewSet,\n basename=\"landmark-annotation\",\n)\n\n# TODO: add terms_of_service and contact\nschema_view = get_schema_view(\n openapi.Info(\n title=f\"{settings.SESSION_COOKIE_DOMAIN.lstrip('.')} API\",\n default_version=\"v1\",\n description=f\"The API for {settings.SESSION_COOKIE_DOMAIN.lstrip('.')}.\",\n license=openapi.License(name=\"Apache License 2.0\"),\n terms_of_service=reverse_lazy(\n \"policies:detail\", kwargs={\"slug\": \"terms-of-service\"}\n ),\n ),\n permission_classes=(permissions.AllowAny,),\n patterns=[path(\"api/v1/\", include(router.urls))],\n)\n\nurlpatterns = [\n url(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(),\n name=\"schema-json\",\n ),\n # Do not namespace the router.urls without updating the view names in\n # the serializers\n path(\"v1/\", include(router.urls)),\n path(\"auth/\", include(\"rest_framework.urls\", namespace=\"rest_framework\")),\n path(\"\", schema_view.with_ui(\"swagger\"), name=\"schema-docs\"),\n]\n", "path": "app/grandchallenge/api/urls.py"}]} | 1,498 | 104 |
gh_patches_debug_12858 | rasdani/github-patches | git_diff | streamlink__streamlink-5616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.hls: recognize URLs with uppercase ".M3U8"
### Checklist
- [X] This is a bug report and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.2.1+29.gc82a8535
### Description
Currently a URL with upper case M3U8, e.g. `https://example.com/live.M3U8` would not be recognized as an HLS URL.
### Debug log
```text
>streamlink https://example.com/live.M3U8 best --loglevel=debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.1
[cli][debug] OpenSSL: OpenSSL 1.1.1q 5 Jul 2022
[cli][debug] Streamlink: 6.2.1+29.gc82a8535
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.16.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.0
[cli][debug] trio-websocket: 0.9.2
[cli][debug] typing-extensions: 4.4.0
[cli][debug] urllib3: 1.26.15
[cli][debug] websocket-client: 1.5.1
[cli][debug] Arguments:
[cli][debug] url=https://example.com/live.M3U8
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player=mpv.exe
[cli][debug] --stream-segment-threads=10
[cli][debug] --hls-segment-queue-threshold=0.0
error: No plugin can handle URL: https://example.com/live.M3U8
```
</issue>
<code>
[start of src/streamlink/plugins/dash.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin, pluginmatcher
5 from streamlink.plugin.plugin import LOW_PRIORITY, parse_params, stream_weight
6 from streamlink.stream.dash import DASHStream
7 from streamlink.utils.url import update_scheme
8
9
10 log = logging.getLogger(__name__)
11
12
13 @pluginmatcher(re.compile(
14 r"dash://(?P<url>\S+)(?:\s(?P<params>.+))?$",
15 ))
16 @pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
17 r"(?P<url>\S+\.mpd(?:\?\S*)?)(?:\s(?P<params>.+))?$",
18 ))
19 class MPEGDASH(Plugin):
20 @classmethod
21 def stream_weight(cls, stream):
22 match = re.match(r"^(?:(.*)\+)?(?:a(\d+)k)$", stream)
23 if match and match.group(1) and match.group(2):
24 weight, group = stream_weight(match.group(1))
25 weight += int(match.group(2))
26 return weight, group
27 elif match and match.group(2):
28 return stream_weight(f"{match.group(2)}k")
29 else:
30 return stream_weight(stream)
31
32 def _get_streams(self):
33 data = self.match.groupdict()
34 url = update_scheme("https://", data.get("url"), force=False)
35 params = parse_params(data.get("params"))
36 log.debug(f"URL={url}; params={params}")
37
38 return DASHStream.parse_manifest(self.session, url, **params)
39
40
41 __plugin__ = MPEGDASH
42
[end of src/streamlink/plugins/dash.py]
[start of src/streamlink/plugins/hls.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin, pluginmatcher
5 from streamlink.plugin.plugin import LOW_PRIORITY, parse_params
6 from streamlink.stream.hls import HLSStream
7 from streamlink.utils.url import update_scheme
8
9
10 log = logging.getLogger(__name__)
11
12
13 @pluginmatcher(re.compile(
14 r"hls(?:variant)?://(?P<url>\S+)(?:\s(?P<params>.+))?$",
15 ))
16 @pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
17 r"(?P<url>\S+\.m3u8(?:\?\S*)?)(?:\s(?P<params>.+))?$",
18 ))
19 class HLSPlugin(Plugin):
20 def _get_streams(self):
21 data = self.match.groupdict()
22 url = update_scheme("https://", data.get("url"), force=False)
23 params = parse_params(data.get("params"))
24 log.debug(f"URL={url}; params={params}")
25
26 streams = HLSStream.parse_variant_playlist(self.session, url, **params)
27
28 return streams or {"live": HLSStream(self.session, url, **params)}
29
30
31 __plugin__ = HLSPlugin
32
[end of src/streamlink/plugins/hls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/dash.py b/src/streamlink/plugins/dash.py
--- a/src/streamlink/plugins/dash.py
+++ b/src/streamlink/plugins/dash.py
@@ -15,6 +15,7 @@
))
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
r"(?P<url>\S+\.mpd(?:\?\S*)?)(?:\s(?P<params>.+))?$",
+ re.IGNORECASE,
))
class MPEGDASH(Plugin):
@classmethod
diff --git a/src/streamlink/plugins/hls.py b/src/streamlink/plugins/hls.py
--- a/src/streamlink/plugins/hls.py
+++ b/src/streamlink/plugins/hls.py
@@ -15,6 +15,7 @@
))
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
r"(?P<url>\S+\.m3u8(?:\?\S*)?)(?:\s(?P<params>.+))?$",
+ re.IGNORECASE,
))
class HLSPlugin(Plugin):
def _get_streams(self):
| {"golden_diff": "diff --git a/src/streamlink/plugins/dash.py b/src/streamlink/plugins/dash.py\n--- a/src/streamlink/plugins/dash.py\n+++ b/src/streamlink/plugins/dash.py\n@@ -15,6 +15,7 @@\n ))\n @pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(\n r\"(?P<url>\\S+\\.mpd(?:\\?\\S*)?)(?:\\s(?P<params>.+))?$\",\n+ re.IGNORECASE,\n ))\n class MPEGDASH(Plugin):\n @classmethod\ndiff --git a/src/streamlink/plugins/hls.py b/src/streamlink/plugins/hls.py\n--- a/src/streamlink/plugins/hls.py\n+++ b/src/streamlink/plugins/hls.py\n@@ -15,6 +15,7 @@\n ))\n @pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(\n r\"(?P<url>\\S+\\.m3u8(?:\\?\\S*)?)(?:\\s(?P<params>.+))?$\",\n+ re.IGNORECASE,\n ))\n class HLSPlugin(Plugin):\n def _get_streams(self):\n", "issue": "plugins.hls: recognize URLs with uppercase \".M3U8\"\n### Checklist\n\n- [X] This is a bug report and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.2.1+29.gc82a8535\n\n### Description\n\nCurrently a URL with upper case M3U8, e.g. `https://example.com/live.M3U8` would not be recognized as an HLS URL.\r\n\r\n\n\n### Debug log\n\n```text\n>streamlink https://example.com/live.M3U8 best --loglevel=debug \r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.1\r\n[cli][debug] OpenSSL: OpenSSL 1.1.1q 5 Jul 2022\r\n[cli][debug] Streamlink: 6.2.1+29.gc82a8535\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.16.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.0\r\n[cli][debug] trio-websocket: 0.9.2\r\n[cli][debug] typing-extensions: 4.4.0\r\n[cli][debug] urllib3: 1.26.15\r\n[cli][debug] websocket-client: 1.5.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://example.com/live.M3U8\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=mpv.exe\r\n[cli][debug] --stream-segment-threads=10\r\n[cli][debug] --hls-segment-queue-threshold=0.0\r\nerror: No plugin can handle URL: https://example.com/live.M3U8\n```\n\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.plugin import LOW_PRIORITY, parse_params, stream_weight\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"dash://(?P<url>\\S+)(?:\\s(?P<params>.+))?$\",\n))\n@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(\n r\"(?P<url>\\S+\\.mpd(?:\\?\\S*)?)(?:\\s(?P<params>.+))?$\",\n))\nclass MPEGDASH(Plugin):\n @classmethod\n def stream_weight(cls, stream):\n match = re.match(r\"^(?:(.*)\\+)?(?:a(\\d+)k)$\", stream)\n if match and match.group(1) and match.group(2):\n weight, group = stream_weight(match.group(1))\n weight += int(match.group(2))\n return weight, group\n elif match and match.group(2):\n return stream_weight(f\"{match.group(2)}k\")\n else:\n return stream_weight(stream)\n\n def _get_streams(self):\n data = self.match.groupdict()\n url = update_scheme(\"https://\", data.get(\"url\"), force=False)\n params = parse_params(data.get(\"params\"))\n log.debug(f\"URL={url}; params={params}\")\n\n return DASHStream.parse_manifest(self.session, url, **params)\n\n\n__plugin__ = MPEGDASH\n", "path": "src/streamlink/plugins/dash.py"}, {"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.plugin import LOW_PRIORITY, parse_params\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"hls(?:variant)?://(?P<url>\\S+)(?:\\s(?P<params>.+))?$\",\n))\n@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(\n r\"(?P<url>\\S+\\.m3u8(?:\\?\\S*)?)(?:\\s(?P<params>.+))?$\",\n))\nclass HLSPlugin(Plugin):\n def _get_streams(self):\n data = self.match.groupdict()\n url = update_scheme(\"https://\", data.get(\"url\"), force=False)\n params = parse_params(data.get(\"params\"))\n log.debug(f\"URL={url}; params={params}\")\n\n streams = HLSStream.parse_variant_playlist(self.session, url, **params)\n\n return streams or {\"live\": HLSStream(self.session, url, **params)}\n\n\n__plugin__ = HLSPlugin\n", "path": "src/streamlink/plugins/hls.py"}]} | 1,930 | 234 |
gh_patches_debug_11981 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1861 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement _array mathesar type
- [ ] Cell component for array type
- [ ] Type specific handling for items within array
- [ ] Handle filtering, grouping
</issue>
<code>
[start of mathesar/database/types.py]
1 """
2 This file describes UI data types and how they map to DB-layer database types (subclasses
3 of db.types.base.DatabaseType).
4 """
5 from enum import Enum
6 from collections.abc import Collection
7 from db.types.base import (
8 DatabaseType, PostgresType, MathesarCustomType
9 )
10 from db.types.hintsets import db_types_hinted
11
12
13 class UIType(Enum):
14 id: str # noqa: NT001
15 display_name: str # noqa: NT001
16 db_types: Collection[DatabaseType] # noqa: NT001
17
18 BOOLEAN = (
19 'boolean',
20 'Boolean',
21 {
22 PostgresType.BOOLEAN,
23 },
24 )
25 DATE = (
26 'date',
27 'Date',
28 {
29 PostgresType.DATE,
30 },
31 )
32 TIME = (
33 'time',
34 'Time',
35 {
36 PostgresType.TIME_WITH_TIME_ZONE,
37 PostgresType.TIME_WITHOUT_TIME_ZONE,
38 },
39 )
40 DATETIME = (
41 'datetime',
42 'Date & Time',
43 {
44 PostgresType.TIMESTAMP_WITH_TIME_ZONE,
45 PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,
46 },
47 )
48 DURATION = (
49 'duration',
50 'Duration',
51 {
52 PostgresType.INTERVAL,
53 },
54 )
55 EMAIL = (
56 'email',
57 'Email',
58 {
59 MathesarCustomType.EMAIL,
60 },
61 )
62 MONEY = (
63 'money',
64 'Money',
65 {
66 PostgresType.MONEY,
67 MathesarCustomType.MATHESAR_MONEY,
68 MathesarCustomType.MULTICURRENCY_MONEY,
69 }
70 )
71 NUMBER = (
72 'number',
73 'Number',
74 {
75 PostgresType.BIGINT,
76 PostgresType.DOUBLE_PRECISION,
77 PostgresType.INTEGER,
78 PostgresType.NUMERIC,
79 PostgresType.REAL,
80 PostgresType.SMALLINT,
81 }
82 )
83 TEXT = (
84 'text',
85 'Text',
86 {
87 PostgresType.CHARACTER,
88 PostgresType.CHARACTER_VARYING,
89 PostgresType.TEXT,
90 PostgresType.NAME,
91 PostgresType.CHAR,
92 },
93 )
94 URI = (
95 'uri',
96 'URI',
97 {
98 MathesarCustomType.URI,
99 }
100 )
101 JSON_ARRAY = (
102 'jsonlist',
103 'JSON List',
104 {
105 MathesarCustomType.MATHESAR_JSON_ARRAY,
106 }
107 )
108 JSON_OBJECT = (
109 'map',
110 'Map',
111 {
112 MathesarCustomType.MATHESAR_JSON_OBJECT,
113 }
114 )
115 # These are default Postgres types that we don't have specific behavior for yet in the UI.
116 OTHER = (
117 'other',
118 'Other',
119 {
120 PostgresType._ARRAY,
121 PostgresType.BIT_VARYING,
122 PostgresType.BIT,
123 PostgresType.BYTEA,
124 PostgresType.CIDR,
125 PostgresType.DATERANGE,
126 PostgresType.HSTORE,
127 PostgresType.INET,
128 PostgresType.INT4RANGE,
129 PostgresType.INT8RANGE,
130 PostgresType.JSON,
131 PostgresType.JSONB,
132 PostgresType.MACADDR,
133 PostgresType.NUMRANGE,
134 PostgresType.OID,
135 PostgresType.REGCLASS,
136 PostgresType.TSRANGE,
137 PostgresType.TSTZRANGE,
138 PostgresType.TSVECTOR,
139 PostgresType.UUID,
140 },
141 )
142
143 def __new__(cls, ui_type_id, display_name, db_types):
144 """
145 The Enum is adapted to take three initial properties. Enum's value is set to be the first
146 property -- the id.
147 """
148 obj = object.__new__(cls)
149 obj._value_ = ui_type_id
150 obj.id = ui_type_id
151 obj.display_name = display_name
152 obj.db_types = frozenset(db_types)
153 return obj
154
155 def __str__(self):
156 return self.id
157
158
159 def ui_types_that_satisfy_hintset(ui_types_mapped_to_hintsets, hintset):
160 """
161 Provided a mapping of UI types to their hintsets and a hintset, tries to find UI
162 types whose hintsets satisfy the passed hintset, meaning the UI types whose hintsets are
163 supersets of the passed hintset.
164 """
165 hintset = set(hintset)
166 return frozenset(
167 ui_type
168 for ui_type, ui_type_hintset
169 in ui_types_mapped_to_hintsets.items()
170 if set.issubset(hintset, ui_type_hintset)
171 )
172
173
174 def get_ui_types_mapped_to_hintsets():
175 """
176 Returns a dict where the keys are UI types and the values their hintsets.
177 A UI type's hintset is defined as the intersection of the hintsets of its associated
178 database types.
179 """
180 ui_types_mapped_to_hintsets = {}
181 for ui_type in UIType:
182 associated_db_types = ui_type.db_types
183 associated_db_type_hintsets = tuple(
184 set(db_types_hinted[associated_db_type])
185 for associated_db_type in associated_db_types
186 if associated_db_type in db_types_hinted
187 )
188 hintsets_intersection = _safe_set_intersection(associated_db_type_hintsets)
189 ui_types_mapped_to_hintsets[ui_type] = frozenset(hintsets_intersection)
190 return ui_types_mapped_to_hintsets
191
192
193 def _safe_set_intersection(sets):
194 # set.intersection fails if it is not passed anything.
195 if len(sets) > 0:
196 return set.intersection(*sets)
197 else:
198 return set()
199
200
201 def get_ui_type_from_db_type(db_type_to_find):
202 for ui_type in UIType:
203 associated_db_types = ui_type.db_types
204 if db_type_to_find in associated_db_types:
205 return ui_type
206
207
208 def get_ui_type_from_id(ui_type_id):
209 try:
210 return UIType(ui_type_id)
211 except ValueError:
212 return None
213
[end of mathesar/database/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/database/types.py b/mathesar/database/types.py
--- a/mathesar/database/types.py
+++ b/mathesar/database/types.py
@@ -112,12 +112,18 @@
MathesarCustomType.MATHESAR_JSON_OBJECT,
}
)
+ ARRAY = (
+ 'array',
+ 'Array',
+ {
+ PostgresType._ARRAY,
+ }
+ )
# These are default Postgres types that we don't have specific behavior for yet in the UI.
OTHER = (
'other',
'Other',
{
- PostgresType._ARRAY,
PostgresType.BIT_VARYING,
PostgresType.BIT,
PostgresType.BYTEA,
| {"golden_diff": "diff --git a/mathesar/database/types.py b/mathesar/database/types.py\n--- a/mathesar/database/types.py\n+++ b/mathesar/database/types.py\n@@ -112,12 +112,18 @@\n MathesarCustomType.MATHESAR_JSON_OBJECT,\n }\n )\n+ ARRAY = (\n+ 'array',\n+ 'Array',\n+ {\n+ PostgresType._ARRAY,\n+ }\n+ )\n # These are default Postgres types that we don't have specific behavior for yet in the UI.\n OTHER = (\n 'other',\n 'Other',\n {\n- PostgresType._ARRAY,\n PostgresType.BIT_VARYING,\n PostgresType.BIT,\n PostgresType.BYTEA,\n", "issue": "Implement _array mathesar type\n- [ ] Cell component for array type\r\n- [ ] Type specific handling for items within array\r\n- [ ] Handle filtering, grouping\n", "before_files": [{"content": "\"\"\"\nThis file describes UI data types and how they map to DB-layer database types (subclasses\nof db.types.base.DatabaseType).\n\"\"\"\nfrom enum import Enum\nfrom collections.abc import Collection\nfrom db.types.base import (\n DatabaseType, PostgresType, MathesarCustomType\n)\nfrom db.types.hintsets import db_types_hinted\n\n\nclass UIType(Enum):\n id: str # noqa: NT001\n display_name: str # noqa: NT001\n db_types: Collection[DatabaseType] # noqa: NT001\n\n BOOLEAN = (\n 'boolean',\n 'Boolean',\n {\n PostgresType.BOOLEAN,\n },\n )\n DATE = (\n 'date',\n 'Date',\n {\n PostgresType.DATE,\n },\n )\n TIME = (\n 'time',\n 'Time',\n {\n PostgresType.TIME_WITH_TIME_ZONE,\n PostgresType.TIME_WITHOUT_TIME_ZONE,\n },\n )\n DATETIME = (\n 'datetime',\n 'Date & Time',\n {\n PostgresType.TIMESTAMP_WITH_TIME_ZONE,\n PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,\n },\n )\n DURATION = (\n 'duration',\n 'Duration',\n {\n PostgresType.INTERVAL,\n },\n )\n EMAIL = (\n 'email',\n 'Email',\n {\n MathesarCustomType.EMAIL,\n },\n )\n MONEY = (\n 'money',\n 'Money',\n {\n PostgresType.MONEY,\n MathesarCustomType.MATHESAR_MONEY,\n MathesarCustomType.MULTICURRENCY_MONEY,\n }\n )\n NUMBER = (\n 'number',\n 'Number',\n {\n PostgresType.BIGINT,\n PostgresType.DOUBLE_PRECISION,\n PostgresType.INTEGER,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n PostgresType.SMALLINT,\n }\n )\n TEXT = (\n 'text',\n 'Text',\n {\n PostgresType.CHARACTER,\n PostgresType.CHARACTER_VARYING,\n PostgresType.TEXT,\n PostgresType.NAME,\n PostgresType.CHAR,\n },\n )\n URI = (\n 'uri',\n 'URI',\n {\n MathesarCustomType.URI,\n }\n )\n JSON_ARRAY = (\n 'jsonlist',\n 'JSON List',\n {\n MathesarCustomType.MATHESAR_JSON_ARRAY,\n }\n )\n JSON_OBJECT = (\n 'map',\n 'Map',\n {\n MathesarCustomType.MATHESAR_JSON_OBJECT,\n }\n )\n # These are default Postgres types that we don't have specific behavior for yet in the UI.\n OTHER = (\n 'other',\n 'Other',\n {\n PostgresType._ARRAY,\n PostgresType.BIT_VARYING,\n PostgresType.BIT,\n PostgresType.BYTEA,\n PostgresType.CIDR,\n PostgresType.DATERANGE,\n PostgresType.HSTORE,\n PostgresType.INET,\n PostgresType.INT4RANGE,\n PostgresType.INT8RANGE,\n PostgresType.JSON,\n PostgresType.JSONB,\n PostgresType.MACADDR,\n PostgresType.NUMRANGE,\n PostgresType.OID,\n PostgresType.REGCLASS,\n PostgresType.TSRANGE,\n PostgresType.TSTZRANGE,\n PostgresType.TSVECTOR,\n PostgresType.UUID,\n },\n )\n\n def __new__(cls, ui_type_id, display_name, db_types):\n \"\"\"\n The Enum is adapted to take three initial properties. Enum's value is set to be the first\n property -- the id.\n \"\"\"\n obj = object.__new__(cls)\n obj._value_ = ui_type_id\n obj.id = ui_type_id\n obj.display_name = display_name\n obj.db_types = frozenset(db_types)\n return obj\n\n def __str__(self):\n return self.id\n\n\ndef ui_types_that_satisfy_hintset(ui_types_mapped_to_hintsets, hintset):\n \"\"\"\n Provided a mapping of UI types to their hintsets and a hintset, tries to find UI\n types whose hintsets satisfy the passed hintset, meaning the UI types whose hintsets are\n supersets of the passed hintset.\n \"\"\"\n hintset = set(hintset)\n return frozenset(\n ui_type\n for ui_type, ui_type_hintset\n in ui_types_mapped_to_hintsets.items()\n if set.issubset(hintset, ui_type_hintset)\n )\n\n\ndef get_ui_types_mapped_to_hintsets():\n \"\"\"\n Returns a dict where the keys are UI types and the values their hintsets.\n A UI type's hintset is defined as the intersection of the hintsets of its associated\n database types.\n \"\"\"\n ui_types_mapped_to_hintsets = {}\n for ui_type in UIType:\n associated_db_types = ui_type.db_types\n associated_db_type_hintsets = tuple(\n set(db_types_hinted[associated_db_type])\n for associated_db_type in associated_db_types\n if associated_db_type in db_types_hinted\n )\n hintsets_intersection = _safe_set_intersection(associated_db_type_hintsets)\n ui_types_mapped_to_hintsets[ui_type] = frozenset(hintsets_intersection)\n return ui_types_mapped_to_hintsets\n\n\ndef _safe_set_intersection(sets):\n # set.intersection fails if it is not passed anything.\n if len(sets) > 0:\n return set.intersection(*sets)\n else:\n return set()\n\n\ndef get_ui_type_from_db_type(db_type_to_find):\n for ui_type in UIType:\n associated_db_types = ui_type.db_types\n if db_type_to_find in associated_db_types:\n return ui_type\n\n\ndef get_ui_type_from_id(ui_type_id):\n try:\n return UIType(ui_type_id)\n except ValueError:\n return None\n", "path": "mathesar/database/types.py"}]} | 2,394 | 164 |
gh_patches_debug_42229 | rasdani/github-patches | git_diff | oppia__oppia-9750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Contributor Dashboard] Ensure try/catch blocks in suggestion.py print useful stack traces.
Specifically we want to ensure the try/catch blocks `ReviewableSuggestionsHandler` and `UserSubmittedSuggestionsHandler` print useful stack traces for debugging. Because Python omits any part of the stack trace "below" the top-level exception in a try/catch block, it is difficult to trace the origin of an exception during debugging. This cost a lot of time when we were debugging the contributor dashboard on a test server.
Some options include removing the try/catch block or using something similar to [`traceback.format_exc()`](https://stackoverflow.com/a/3702847) to print useful stack traces.
[Contributor Dashboard] Ensure try/catch blocks in suggestion.py print useful stack traces.
Specifically we want to ensure the try/catch blocks `ReviewableSuggestionsHandler` and `UserSubmittedSuggestionsHandler` print useful stack traces for debugging. Because Python omits any part of the stack trace "below" the top-level exception in a try/catch block, it is difficult to trace the origin of an exception during debugging. This cost a lot of time when we were debugging the contributor dashboard on a test server.
Some options include removing the try/catch block or using something similar to [`traceback.format_exc()`](https://stackoverflow.com/a/3702847) to print useful stack traces.
</issue>
<code>
[start of core/controllers/suggestion.py]
1 # coding: utf-8
2 #
3 # Copyright 2018 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Controllers for suggestions."""
18
19 from __future__ import absolute_import # pylint: disable=import-only-modules
20 from __future__ import unicode_literals # pylint: disable=import-only-modules
21
22 from core.controllers import acl_decorators
23 from core.controllers import base
24 from core.domain import opportunity_services
25 from core.domain import skill_fetchers
26 from core.domain import suggestion_services
27 from core.platform import models
28 import feconf
29 import utils
30
31 (suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
32
33
34 def _require_valid_suggestion_and_target_types(target_type, suggestion_type):
35 """Checks whether the given target_type and suggestion_type are valid.
36
37 Args:
38 target_type: str. The type of the suggestion target.
39 suggestion_type: str. The type of the suggestion.
40
41 Raises:
42 InvalidInputException: If the given target_type of suggestion_type are
43 invalid.
44 """
45 if target_type not in suggestion_models.TARGET_TYPE_CHOICES:
46 raise utils.InvalidInputException(
47 'Invalid target_type: %s' % target_type)
48
49 if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:
50 raise utils.InvalidInputException(
51 'Invalid suggestion_type: %s' % suggestion_type)
52
53
54 def _get_target_id_to_exploration_opportunity_dict(suggestions):
55 """Returns a dict of target_id to exploration opportunity summary dict.
56
57 Args:
58 suggestions: list(BaseSuggestion). A list of suggestions to retrieve
59 opportunity dicts.
60
61 Returns:
62 dict. Dict mapping target_id to corresponding exploration opportunity
63 summary dict.
64 """
65 target_ids = set([s.target_id for s in suggestions])
66 opportunities = (
67 opportunity_services.get_exploration_opportunity_summaries_by_ids(
68 list(target_ids)))
69 return {opp.id: opp.to_dict() for opp in opportunities}
70
71
72 def _get_target_id_to_skill_opportunity_dict(suggestions):
73 """Returns a dict of target_id to skill opportunity summary dict.
74
75 Args:
76 suggestions: list(BaseSuggestion). A list of suggestions to retrieve
77 opportunity dicts.
78
79 Returns:
80 dict. Dict mapping target_id to corresponding skill opportunity dict.
81 """
82 target_ids = set([s.target_id for s in suggestions])
83 opportunities = (
84 opportunity_services.get_skill_opportunities_by_ids(list(target_ids)))
85 opportunity_skill_ids = [opp.id for opp in opportunities]
86 opportunity_id_to_skill = {
87 skill.id: skill
88 for skill in skill_fetchers.get_multi_skills(opportunity_skill_ids)
89 }
90 opportunity_id_to_opportunity = {}
91 for opp in opportunities:
92 opp_dict = opp.to_dict()
93 skill = opportunity_id_to_skill.get(opp.id)
94 if skill is not None:
95 opp_dict['skill_rubrics'] = [
96 rubric.to_dict() for rubric in skill.rubrics]
97 opportunity_id_to_opportunity[opp.id] = opp_dict
98 return opportunity_id_to_opportunity
99
100
101 class SuggestionHandler(base.BaseHandler):
102 """"Handles operations relating to suggestions."""
103
104 @acl_decorators.can_suggest_changes
105 def post(self):
106 try:
107 suggestion_services.create_suggestion(
108 self.payload.get('suggestion_type'),
109 self.payload.get('target_type'), self.payload.get('target_id'),
110 self.payload.get('target_version_at_submission'),
111 self.user_id, self.payload.get('change'),
112 self.payload.get('description'))
113 except utils.ValidationError as e:
114 raise self.InvalidInputException(e)
115 self.render_json(self.values)
116
117
118 class SuggestionToExplorationActionHandler(base.BaseHandler):
119 """Handles actions performed on suggestions to explorations."""
120
121 @acl_decorators.get_decorator_for_accepting_suggestion(
122 acl_decorators.can_edit_exploration)
123 def put(self, target_id, suggestion_id):
124 if (
125 suggestion_id.split('.')[0] !=
126 suggestion_models.TARGET_TYPE_EXPLORATION):
127 raise self.InvalidInputException('This handler allows actions only'
128 ' on suggestions to explorations.')
129
130 if suggestion_id.split('.')[1] != target_id:
131 raise self.InvalidInputException('The exploration id provided does '
132 'not match the exploration id '
133 'present as part of the '
134 'suggestion_id')
135
136 action = self.payload.get('action')
137 suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
138
139 if suggestion.author_id == self.user_id:
140 raise self.UnauthorizedUserException('You cannot accept/reject your'
141 ' own suggestion.')
142
143 if action == suggestion_models.ACTION_TYPE_ACCEPT:
144 suggestion_services.accept_suggestion(
145 suggestion, self.user_id, self.payload.get('commit_message'),
146 self.payload.get('review_message'))
147 elif action == suggestion_models.ACTION_TYPE_REJECT:
148 suggestion_services.reject_suggestion(
149 suggestion, self.user_id, self.payload.get('review_message'))
150 else:
151 raise self.InvalidInputException('Invalid action.')
152
153 self.render_json(self.values)
154
155
156 class ResubmitSuggestionHandler(base.BaseHandler):
157 """Handler to reopen a rejected suggestion."""
158
159 @acl_decorators.can_resubmit_suggestion
160 def put(self, suggestion_id):
161 suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
162 new_change = self.payload.get('change')
163 change_cls = type(suggestion.change)
164 change_object = change_cls(new_change)
165 suggestion.pre_update_validate(change_object)
166 suggestion.change = change_object
167 summary_message = self.payload.get('summary_message')
168 suggestion_services.resubmit_rejected_suggestion(
169 suggestion, summary_message, self.user_id)
170 self.render_json(self.values)
171
172
173 class SuggestionToSkillActionHandler(base.BaseHandler):
174 """Handles actions performed on suggestions to skills."""
175
176 @acl_decorators.get_decorator_for_accepting_suggestion(
177 acl_decorators.can_edit_skill)
178 def put(self, target_id, suggestion_id):
179 if suggestion_id.split('.')[0] != suggestion_models.TARGET_TYPE_SKILL:
180 raise self.InvalidInputException(
181 'This handler allows actions only on suggestions to skills.')
182
183 if suggestion_id.split('.')[1] != target_id:
184 raise self.InvalidInputException(
185 'The skill id provided does not match the skill id present as '
186 'part of the suggestion_id')
187
188 action = self.payload.get('action')
189 suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
190
191 if action == suggestion_models.ACTION_TYPE_ACCEPT:
192 # Question suggestions do not use commit messages.
193 suggestion_services.accept_suggestion(
194 suggestion, self.user_id, 'UNUSED_COMMIT_MESSAGE',
195 self.payload.get('review_message'))
196 elif action == suggestion_models.ACTION_TYPE_REJECT:
197 suggestion_services.reject_suggestion(
198 suggestion, self.user_id, self.payload.get('review_message'))
199 else:
200 raise self.InvalidInputException('Invalid action.')
201
202 self.render_json(self.values)
203
204
205 class SuggestionsProviderHandler(base.BaseHandler):
206 """Provides suggestions for a user and given suggestion type."""
207
208 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
209
210 def _render_suggestions(self, target_type, suggestions):
211 """Renders retrieved suggestions.
212
213 Args:
214 target_type: str. The suggestion type.
215 suggestions: list(BaseSuggestion). A list of suggestions to render.
216 """
217 if target_type == suggestion_models.TARGET_TYPE_EXPLORATION:
218 target_id_to_opportunity_dict = (
219 _get_target_id_to_exploration_opportunity_dict(suggestions))
220 self.render_json({
221 'suggestions': [s.to_dict() for s in suggestions],
222 'target_id_to_opportunity_dict':
223 target_id_to_opportunity_dict
224 })
225 elif target_type == suggestion_models.TARGET_TYPE_SKILL:
226 target_id_to_opportunity_dict = (
227 _get_target_id_to_skill_opportunity_dict(suggestions))
228 self.render_json({
229 'suggestions': [s.to_dict() for s in suggestions],
230 'target_id_to_opportunity_dict':
231 target_id_to_opportunity_dict
232 })
233 else:
234 self.render_json({})
235
236
237 class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
238 """Provides all suggestions which can be reviewed by the user for a given
239 suggestion type.
240 """
241
242 @acl_decorators.can_view_reviewable_suggestions
243 def get(self, target_type, suggestion_type):
244 """Handles GET requests."""
245 try:
246 _require_valid_suggestion_and_target_types(
247 target_type, suggestion_type)
248 suggestions = suggestion_services.get_reviewable_suggestions(
249 self.user_id, suggestion_type)
250 self._render_suggestions(target_type, suggestions)
251 except Exception as e:
252 raise self.InvalidInputException(e)
253
254
255 class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
256 """Provides all suggestions which are submitted by the user for a given
257 suggestion type.
258 """
259
260 @acl_decorators.can_suggest_changes
261 def get(self, target_type, suggestion_type):
262 """Handles GET requests."""
263 try:
264 _require_valid_suggestion_and_target_types(
265 target_type, suggestion_type)
266 suggestions = suggestion_services.get_submitted_suggestions(
267 self.user_id, suggestion_type)
268 self._render_suggestions(target_type, suggestions)
269 except Exception as e:
270 raise self.InvalidInputException(e)
271
272
273 class SuggestionListHandler(base.BaseHandler):
274 """Handles list operations on suggestions."""
275
276 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
277
278 @acl_decorators.open_access
279 def get(self):
280 # The query_fields_and_values variable is a list of tuples. The first
281 # element in each tuple is the field being queried and the second
282 # element is the value of the field being queried.
283 # request.GET.items() parses the params from the url into the above
284 # format. So in the url, the query should be passed as:
285 # ?field1=value1&field2=value2...fieldN=valueN.
286 query_fields_and_values = list(self.request.GET.items())
287
288 for query in query_fields_and_values:
289 if query[0] not in suggestion_models.ALLOWED_QUERY_FIELDS:
290 raise self.InvalidInputException(
291 'Not allowed to query on field %s' % query[0])
292
293 suggestions = suggestion_services.query_suggestions(
294 query_fields_and_values)
295
296 self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
297 self.render_json(self.values)
298
[end of core/controllers/suggestion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/controllers/suggestion.py b/core/controllers/suggestion.py
--- a/core/controllers/suggestion.py
+++ b/core/controllers/suggestion.py
@@ -31,26 +31,6 @@
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
-def _require_valid_suggestion_and_target_types(target_type, suggestion_type):
- """Checks whether the given target_type and suggestion_type are valid.
-
- Args:
- target_type: str. The type of the suggestion target.
- suggestion_type: str. The type of the suggestion.
-
- Raises:
- InvalidInputException: If the given target_type of suggestion_type are
- invalid.
- """
- if target_type not in suggestion_models.TARGET_TYPE_CHOICES:
- raise utils.InvalidInputException(
- 'Invalid target_type: %s' % target_type)
-
- if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:
- raise utils.InvalidInputException(
- 'Invalid suggestion_type: %s' % suggestion_type)
-
-
def _get_target_id_to_exploration_opportunity_dict(suggestions):
"""Returns a dict of target_id to exploration opportunity summary dict.
@@ -207,6 +187,26 @@
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
+ def _require_valid_suggestion_and_target_types(
+ self, target_type, suggestion_type):
+ """Checks whether the given target_type and suggestion_type are valid.
+
+ Args:
+ target_type: str. The type of the suggestion target.
+ suggestion_type: str. The type of the suggestion.
+
+ Raises:
+ InvalidInputException: If the given target_type of suggestion_type
+ are invalid.
+ """
+ if target_type not in suggestion_models.TARGET_TYPE_CHOICES:
+ raise self.InvalidInputException(
+ 'Invalid target_type: %s' % target_type)
+
+ if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:
+ raise self.InvalidInputException(
+ 'Invalid suggestion_type: %s' % suggestion_type)
+
def _render_suggestions(self, target_type, suggestions):
"""Renders retrieved suggestions.
@@ -242,14 +242,11 @@
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
"""Handles GET requests."""
- try:
- _require_valid_suggestion_and_target_types(
- target_type, suggestion_type)
- suggestions = suggestion_services.get_reviewable_suggestions(
- self.user_id, suggestion_type)
- self._render_suggestions(target_type, suggestions)
- except Exception as e:
- raise self.InvalidInputException(e)
+ self._require_valid_suggestion_and_target_types(
+ target_type, suggestion_type)
+ suggestions = suggestion_services.get_reviewable_suggestions(
+ self.user_id, suggestion_type)
+ self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
@@ -260,14 +257,11 @@
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
"""Handles GET requests."""
- try:
- _require_valid_suggestion_and_target_types(
- target_type, suggestion_type)
- suggestions = suggestion_services.get_submitted_suggestions(
- self.user_id, suggestion_type)
- self._render_suggestions(target_type, suggestions)
- except Exception as e:
- raise self.InvalidInputException(e)
+ self._require_valid_suggestion_and_target_types(
+ target_type, suggestion_type)
+ suggestions = suggestion_services.get_submitted_suggestions(
+ self.user_id, suggestion_type)
+ self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
| {"golden_diff": "diff --git a/core/controllers/suggestion.py b/core/controllers/suggestion.py\n--- a/core/controllers/suggestion.py\n+++ b/core/controllers/suggestion.py\n@@ -31,26 +31,6 @@\n (suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])\n \n \n-def _require_valid_suggestion_and_target_types(target_type, suggestion_type):\n- \"\"\"Checks whether the given target_type and suggestion_type are valid.\n-\n- Args:\n- target_type: str. The type of the suggestion target.\n- suggestion_type: str. The type of the suggestion.\n-\n- Raises:\n- InvalidInputException: If the given target_type of suggestion_type are\n- invalid.\n- \"\"\"\n- if target_type not in suggestion_models.TARGET_TYPE_CHOICES:\n- raise utils.InvalidInputException(\n- 'Invalid target_type: %s' % target_type)\n-\n- if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:\n- raise utils.InvalidInputException(\n- 'Invalid suggestion_type: %s' % suggestion_type)\n-\n-\n def _get_target_id_to_exploration_opportunity_dict(suggestions):\n \"\"\"Returns a dict of target_id to exploration opportunity summary dict.\n \n@@ -207,6 +187,26 @@\n \n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n \n+ def _require_valid_suggestion_and_target_types(\n+ self, target_type, suggestion_type):\n+ \"\"\"Checks whether the given target_type and suggestion_type are valid.\n+\n+ Args:\n+ target_type: str. The type of the suggestion target.\n+ suggestion_type: str. The type of the suggestion.\n+\n+ Raises:\n+ InvalidInputException: If the given target_type of suggestion_type\n+ are invalid.\n+ \"\"\"\n+ if target_type not in suggestion_models.TARGET_TYPE_CHOICES:\n+ raise self.InvalidInputException(\n+ 'Invalid target_type: %s' % target_type)\n+\n+ if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:\n+ raise self.InvalidInputException(\n+ 'Invalid suggestion_type: %s' % suggestion_type)\n+\n def _render_suggestions(self, target_type, suggestions):\n \"\"\"Renders retrieved suggestions.\n \n@@ -242,14 +242,11 @@\n @acl_decorators.can_view_reviewable_suggestions\n def get(self, target_type, suggestion_type):\n \"\"\"Handles GET requests.\"\"\"\n- try:\n- _require_valid_suggestion_and_target_types(\n- target_type, suggestion_type)\n- suggestions = suggestion_services.get_reviewable_suggestions(\n- self.user_id, suggestion_type)\n- self._render_suggestions(target_type, suggestions)\n- except Exception as e:\n- raise self.InvalidInputException(e)\n+ self._require_valid_suggestion_and_target_types(\n+ target_type, suggestion_type)\n+ suggestions = suggestion_services.get_reviewable_suggestions(\n+ self.user_id, suggestion_type)\n+ self._render_suggestions(target_type, suggestions)\n \n \n class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):\n@@ -260,14 +257,11 @@\n @acl_decorators.can_suggest_changes\n def get(self, target_type, suggestion_type):\n \"\"\"Handles GET requests.\"\"\"\n- try:\n- _require_valid_suggestion_and_target_types(\n- target_type, suggestion_type)\n- suggestions = suggestion_services.get_submitted_suggestions(\n- self.user_id, suggestion_type)\n- self._render_suggestions(target_type, suggestions)\n- except Exception as e:\n- raise self.InvalidInputException(e)\n+ self._require_valid_suggestion_and_target_types(\n+ target_type, suggestion_type)\n+ suggestions = suggestion_services.get_submitted_suggestions(\n+ self.user_id, suggestion_type)\n+ self._render_suggestions(target_type, suggestions)\n \n \n class SuggestionListHandler(base.BaseHandler):\n", "issue": "[Contributor Dashboard] Ensure try/catch blocks in suggestion.py print useful stack traces.\nSpecifically we want to ensure the try/catch blocks `ReviewableSuggestionsHandler` and `UserSubmittedSuggestionsHandler` print useful stack traces for debugging. Because Python omits any part of the stack trace \"below\" the top-level exception in a try/catch block, it is difficult to trace the origin of an exception during debugging. This cost a lot of time when we were debugging the contributor dashboard on a test server.\r\n\r\nSome options include removing the try/catch block or using something similar to [`traceback.format_exc()`](https://stackoverflow.com/a/3702847) to print useful stack traces.\n[Contributor Dashboard] Ensure try/catch blocks in suggestion.py print useful stack traces.\nSpecifically we want to ensure the try/catch blocks `ReviewableSuggestionsHandler` and `UserSubmittedSuggestionsHandler` print useful stack traces for debugging. Because Python omits any part of the stack trace \"below\" the top-level exception in a try/catch block, it is difficult to trace the origin of an exception during debugging. This cost a lot of time when we were debugging the contributor dashboard on a test server.\r\n\r\nSome options include removing the try/catch block or using something similar to [`traceback.format_exc()`](https://stackoverflow.com/a/3702847) to print useful stack traces.\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for suggestions.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import opportunity_services\nfrom core.domain import skill_fetchers\nfrom core.domain import suggestion_services\nfrom core.platform import models\nimport feconf\nimport utils\n\n(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])\n\n\ndef _require_valid_suggestion_and_target_types(target_type, suggestion_type):\n \"\"\"Checks whether the given target_type and suggestion_type are valid.\n\n Args:\n target_type: str. The type of the suggestion target.\n suggestion_type: str. The type of the suggestion.\n\n Raises:\n InvalidInputException: If the given target_type of suggestion_type are\n invalid.\n \"\"\"\n if target_type not in suggestion_models.TARGET_TYPE_CHOICES:\n raise utils.InvalidInputException(\n 'Invalid target_type: %s' % target_type)\n\n if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:\n raise utils.InvalidInputException(\n 'Invalid suggestion_type: %s' % suggestion_type)\n\n\ndef _get_target_id_to_exploration_opportunity_dict(suggestions):\n \"\"\"Returns a dict of target_id to exploration opportunity summary dict.\n\n Args:\n suggestions: list(BaseSuggestion). A list of suggestions to retrieve\n opportunity dicts.\n\n Returns:\n dict. Dict mapping target_id to corresponding exploration opportunity\n summary dict.\n \"\"\"\n target_ids = set([s.target_id for s in suggestions])\n opportunities = (\n opportunity_services.get_exploration_opportunity_summaries_by_ids(\n list(target_ids)))\n return {opp.id: opp.to_dict() for opp in opportunities}\n\n\ndef _get_target_id_to_skill_opportunity_dict(suggestions):\n \"\"\"Returns a dict of target_id to skill opportunity summary dict.\n\n Args:\n suggestions: list(BaseSuggestion). A list of suggestions to retrieve\n opportunity dicts.\n\n Returns:\n dict. Dict mapping target_id to corresponding skill opportunity dict.\n \"\"\"\n target_ids = set([s.target_id for s in suggestions])\n opportunities = (\n opportunity_services.get_skill_opportunities_by_ids(list(target_ids)))\n opportunity_skill_ids = [opp.id for opp in opportunities]\n opportunity_id_to_skill = {\n skill.id: skill\n for skill in skill_fetchers.get_multi_skills(opportunity_skill_ids)\n }\n opportunity_id_to_opportunity = {}\n for opp in opportunities:\n opp_dict = opp.to_dict()\n skill = opportunity_id_to_skill.get(opp.id)\n if skill is not None:\n opp_dict['skill_rubrics'] = [\n rubric.to_dict() for rubric in skill.rubrics]\n opportunity_id_to_opportunity[opp.id] = opp_dict\n return opportunity_id_to_opportunity\n\n\nclass SuggestionHandler(base.BaseHandler):\n \"\"\"\"Handles operations relating to suggestions.\"\"\"\n\n @acl_decorators.can_suggest_changes\n def post(self):\n try:\n suggestion_services.create_suggestion(\n self.payload.get('suggestion_type'),\n self.payload.get('target_type'), self.payload.get('target_id'),\n self.payload.get('target_version_at_submission'),\n self.user_id, self.payload.get('change'),\n self.payload.get('description'))\n except utils.ValidationError as e:\n raise self.InvalidInputException(e)\n self.render_json(self.values)\n\n\nclass SuggestionToExplorationActionHandler(base.BaseHandler):\n \"\"\"Handles actions performed on suggestions to explorations.\"\"\"\n\n @acl_decorators.get_decorator_for_accepting_suggestion(\n acl_decorators.can_edit_exploration)\n def put(self, target_id, suggestion_id):\n if (\n suggestion_id.split('.')[0] !=\n suggestion_models.TARGET_TYPE_EXPLORATION):\n raise self.InvalidInputException('This handler allows actions only'\n ' on suggestions to explorations.')\n\n if suggestion_id.split('.')[1] != target_id:\n raise self.InvalidInputException('The exploration id provided does '\n 'not match the exploration id '\n 'present as part of the '\n 'suggestion_id')\n\n action = self.payload.get('action')\n suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)\n\n if suggestion.author_id == self.user_id:\n raise self.UnauthorizedUserException('You cannot accept/reject your'\n ' own suggestion.')\n\n if action == suggestion_models.ACTION_TYPE_ACCEPT:\n suggestion_services.accept_suggestion(\n suggestion, self.user_id, self.payload.get('commit_message'),\n self.payload.get('review_message'))\n elif action == suggestion_models.ACTION_TYPE_REJECT:\n suggestion_services.reject_suggestion(\n suggestion, self.user_id, self.payload.get('review_message'))\n else:\n raise self.InvalidInputException('Invalid action.')\n\n self.render_json(self.values)\n\n\nclass ResubmitSuggestionHandler(base.BaseHandler):\n \"\"\"Handler to reopen a rejected suggestion.\"\"\"\n\n @acl_decorators.can_resubmit_suggestion\n def put(self, suggestion_id):\n suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)\n new_change = self.payload.get('change')\n change_cls = type(suggestion.change)\n change_object = change_cls(new_change)\n suggestion.pre_update_validate(change_object)\n suggestion.change = change_object\n summary_message = self.payload.get('summary_message')\n suggestion_services.resubmit_rejected_suggestion(\n suggestion, summary_message, self.user_id)\n self.render_json(self.values)\n\n\nclass SuggestionToSkillActionHandler(base.BaseHandler):\n \"\"\"Handles actions performed on suggestions to skills.\"\"\"\n\n @acl_decorators.get_decorator_for_accepting_suggestion(\n acl_decorators.can_edit_skill)\n def put(self, target_id, suggestion_id):\n if suggestion_id.split('.')[0] != suggestion_models.TARGET_TYPE_SKILL:\n raise self.InvalidInputException(\n 'This handler allows actions only on suggestions to skills.')\n\n if suggestion_id.split('.')[1] != target_id:\n raise self.InvalidInputException(\n 'The skill id provided does not match the skill id present as '\n 'part of the suggestion_id')\n\n action = self.payload.get('action')\n suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)\n\n if action == suggestion_models.ACTION_TYPE_ACCEPT:\n # Question suggestions do not use commit messages.\n suggestion_services.accept_suggestion(\n suggestion, self.user_id, 'UNUSED_COMMIT_MESSAGE',\n self.payload.get('review_message'))\n elif action == suggestion_models.ACTION_TYPE_REJECT:\n suggestion_services.reject_suggestion(\n suggestion, self.user_id, self.payload.get('review_message'))\n else:\n raise self.InvalidInputException('Invalid action.')\n\n self.render_json(self.values)\n\n\nclass SuggestionsProviderHandler(base.BaseHandler):\n \"\"\"Provides suggestions for a user and given suggestion type.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n def _render_suggestions(self, target_type, suggestions):\n \"\"\"Renders retrieved suggestions.\n\n Args:\n target_type: str. The suggestion type.\n suggestions: list(BaseSuggestion). A list of suggestions to render.\n \"\"\"\n if target_type == suggestion_models.TARGET_TYPE_EXPLORATION:\n target_id_to_opportunity_dict = (\n _get_target_id_to_exploration_opportunity_dict(suggestions))\n self.render_json({\n 'suggestions': [s.to_dict() for s in suggestions],\n 'target_id_to_opportunity_dict':\n target_id_to_opportunity_dict\n })\n elif target_type == suggestion_models.TARGET_TYPE_SKILL:\n target_id_to_opportunity_dict = (\n _get_target_id_to_skill_opportunity_dict(suggestions))\n self.render_json({\n 'suggestions': [s.to_dict() for s in suggestions],\n 'target_id_to_opportunity_dict':\n target_id_to_opportunity_dict\n })\n else:\n self.render_json({})\n\n\nclass ReviewableSuggestionsHandler(SuggestionsProviderHandler):\n \"\"\"Provides all suggestions which can be reviewed by the user for a given\n suggestion type.\n \"\"\"\n\n @acl_decorators.can_view_reviewable_suggestions\n def get(self, target_type, suggestion_type):\n \"\"\"Handles GET requests.\"\"\"\n try:\n _require_valid_suggestion_and_target_types(\n target_type, suggestion_type)\n suggestions = suggestion_services.get_reviewable_suggestions(\n self.user_id, suggestion_type)\n self._render_suggestions(target_type, suggestions)\n except Exception as e:\n raise self.InvalidInputException(e)\n\n\nclass UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):\n \"\"\"Provides all suggestions which are submitted by the user for a given\n suggestion type.\n \"\"\"\n\n @acl_decorators.can_suggest_changes\n def get(self, target_type, suggestion_type):\n \"\"\"Handles GET requests.\"\"\"\n try:\n _require_valid_suggestion_and_target_types(\n target_type, suggestion_type)\n suggestions = suggestion_services.get_submitted_suggestions(\n self.user_id, suggestion_type)\n self._render_suggestions(target_type, suggestions)\n except Exception as e:\n raise self.InvalidInputException(e)\n\n\nclass SuggestionListHandler(base.BaseHandler):\n \"\"\"Handles list operations on suggestions.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.open_access\n def get(self):\n # The query_fields_and_values variable is a list of tuples. The first\n # element in each tuple is the field being queried and the second\n # element is the value of the field being queried.\n # request.GET.items() parses the params from the url into the above\n # format. So in the url, the query should be passed as:\n # ?field1=value1&field2=value2...fieldN=valueN.\n query_fields_and_values = list(self.request.GET.items())\n\n for query in query_fields_and_values:\n if query[0] not in suggestion_models.ALLOWED_QUERY_FIELDS:\n raise self.InvalidInputException(\n 'Not allowed to query on field %s' % query[0])\n\n suggestions = suggestion_services.query_suggestions(\n query_fields_and_values)\n\n self.values.update({'suggestions': [s.to_dict() for s in suggestions]})\n self.render_json(self.values)\n", "path": "core/controllers/suggestion.py"}]} | 3,946 | 844 |
gh_patches_debug_4119 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
First time password reset should redirect to login page instead of confirmation page
## Description
* After the user resets their password at the page: `/auth/password_reset_confirm`, they are redirected to `/auth/reset/done/`.
* They should be redirected to `/auth/login` instead.
</issue>
<code>
[start of mathesar/users/password_reset.py]
1 from django.contrib.auth.forms import SetPasswordForm
2 from django.contrib.auth.views import PasswordResetConfirmView
3 from django.utils.decorators import method_decorator
4 from django.views.decorators.cache import never_cache
5 from django.views.decorators.debug import sensitive_post_parameters
6 from django.utils.translation import gettext_lazy as _
7
8
9 class MathesarSetPasswordForm(SetPasswordForm):
10 def save(self, commit=True):
11 password = self.cleaned_data["new_password1"]
12 self.user.set_password(password)
13 # Default password is replaced with a password is set by the user, so change the status
14 self.user.password_change_needed = False
15 if commit:
16 self.user.save()
17 return self.user
18
19
20 class MathesarPasswordResetConfirmView(PasswordResetConfirmView):
21 # Override default form as we need custom save behaviour
22 form_class = MathesarSetPasswordForm
23 template_name = 'users/password_reset_confirmation.html'
24 title = _('Change Default Password')
25
26 @method_decorator(sensitive_post_parameters())
27 @method_decorator(never_cache)
28 def dispatch(self, *args, **kwargs):
29 self.user = self.request.user
30 self.validlink = True
31 # Avoid calling the PasswordResetConfirmView `dispatch` method
32 # as it contains behaviours not suited for our user flow
33 return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
34
35 def form_valid(self, form):
36 form.save()
37 return super(PasswordResetConfirmView, self).form_valid(form)
38
[end of mathesar/users/password_reset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/users/password_reset.py b/mathesar/users/password_reset.py
--- a/mathesar/users/password_reset.py
+++ b/mathesar/users/password_reset.py
@@ -22,6 +22,7 @@
form_class = MathesarSetPasswordForm
template_name = 'users/password_reset_confirmation.html'
title = _('Change Default Password')
+ success_url = "/auth/login"
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
| {"golden_diff": "diff --git a/mathesar/users/password_reset.py b/mathesar/users/password_reset.py\n--- a/mathesar/users/password_reset.py\n+++ b/mathesar/users/password_reset.py\n@@ -22,6 +22,7 @@\n form_class = MathesarSetPasswordForm\n template_name = 'users/password_reset_confirmation.html'\n title = _('Change Default Password')\n+ success_url = \"/auth/login\"\n \n @method_decorator(sensitive_post_parameters())\n @method_decorator(never_cache)\n", "issue": "First time password reset should redirect to login page instead of confirmation page\n## Description\r\n* After the user resets their password at the page: `/auth/password_reset_confirm`, they are redirected to `/auth/reset/done/`.\r\n* They should be redirected to `/auth/login` instead.\n", "before_files": [{"content": "from django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.views import PasswordResetConfirmView\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass MathesarSetPasswordForm(SetPasswordForm):\n def save(self, commit=True):\n password = self.cleaned_data[\"new_password1\"]\n self.user.set_password(password)\n # Default password is replaced with a password is set by the user, so change the status\n self.user.password_change_needed = False\n if commit:\n self.user.save()\n return self.user\n\n\nclass MathesarPasswordResetConfirmView(PasswordResetConfirmView):\n # Override default form as we need custom save behaviour\n form_class = MathesarSetPasswordForm\n template_name = 'users/password_reset_confirmation.html'\n title = _('Change Default Password')\n\n @method_decorator(sensitive_post_parameters())\n @method_decorator(never_cache)\n def dispatch(self, *args, **kwargs):\n self.user = self.request.user\n self.validlink = True\n # Avoid calling the PasswordResetConfirmView `dispatch` method\n # as it contains behaviours not suited for our user flow\n return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)\n\n def form_valid(self, form):\n form.save()\n return super(PasswordResetConfirmView, self).form_valid(form)\n", "path": "mathesar/users/password_reset.py"}]} | 973 | 104 |
gh_patches_debug_9831 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annotation complexity should not fail on expressions
# Bug report
This code:
```python
def some() -> 'test expression':
...
```
Makes `flake8-annotation-complexity` to fail. We need to ignore this case silently.
Related: #1170
Demo: https://asciinema.org/a/IIjIfkVKytmZ1F5c2YufMORdi
Annotation complexity should not fail on expressions
# Bug report
This code:
```python
def some() -> 'test expression':
...
```
Makes `flake8-annotation-complexity` to fail. We need to ignore this case silently.
Related: #1170
Demo: https://asciinema.org/a/IIjIfkVKytmZ1F5c2YufMORdi
</issue>
<code>
[start of wemake_python_styleguide/logic/complexity/annotations.py]
1 """
2 Counts annotation complexity by getting the nesting level of nodes.
3
4 So ``List[int]`` complexity is 2
5 and ``Tuple[List[Optional[str]], int]`` is 4.
6
7 Adapted from: https://github.com/best-doctor/flake8-annotations-complexity
8 """
9
10 import ast
11 from typing import Union
12
13 _Annotation = Union[
14 ast.expr,
15 ast.Str,
16 ]
17
18
19 def get_annotation_compexity(annotation_node: _Annotation) -> int:
20 """
21 Recursevly counts complexity of annotation nodes.
22
23 When annotations are written as strings,
24 we additionally parse them to ``ast`` nodes.
25 """
26 if isinstance(annotation_node, ast.Str):
27 annotation_node = ast.parse( # type: ignore
28 annotation_node.s,
29 ).body[0].value
30
31 if isinstance(annotation_node, ast.Subscript):
32 return 1 + get_annotation_compexity(
33 annotation_node.slice.value, # type: ignore
34 )
35 elif isinstance(annotation_node, (ast.Tuple, ast.List)):
36 return max(
37 (get_annotation_compexity(node) for node in annotation_node.elts),
38 default=1,
39 )
40 return 1
41
[end of wemake_python_styleguide/logic/complexity/annotations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/logic/complexity/annotations.py b/wemake_python_styleguide/logic/complexity/annotations.py
--- a/wemake_python_styleguide/logic/complexity/annotations.py
+++ b/wemake_python_styleguide/logic/complexity/annotations.py
@@ -24,9 +24,12 @@
we additionally parse them to ``ast`` nodes.
"""
if isinstance(annotation_node, ast.Str):
- annotation_node = ast.parse( # type: ignore
- annotation_node.s,
- ).body[0].value
+ try:
+ annotation_node = ast.parse( # type: ignore
+ annotation_node.s,
+ ).body[0].value
+ except SyntaxError:
+ return 1
if isinstance(annotation_node, ast.Subscript):
return 1 + get_annotation_compexity(
| {"golden_diff": "diff --git a/wemake_python_styleguide/logic/complexity/annotations.py b/wemake_python_styleguide/logic/complexity/annotations.py\n--- a/wemake_python_styleguide/logic/complexity/annotations.py\n+++ b/wemake_python_styleguide/logic/complexity/annotations.py\n@@ -24,9 +24,12 @@\n we additionally parse them to ``ast`` nodes.\n \"\"\"\n if isinstance(annotation_node, ast.Str):\n- annotation_node = ast.parse( # type: ignore\n- annotation_node.s,\n- ).body[0].value\n+ try:\n+ annotation_node = ast.parse( # type: ignore\n+ annotation_node.s,\n+ ).body[0].value\n+ except SyntaxError:\n+ return 1\n \n if isinstance(annotation_node, ast.Subscript):\n return 1 + get_annotation_compexity(\n", "issue": "Annotation complexity should not fail on expressions\n# Bug report\r\n\r\nThis code:\r\n\r\n```python\r\ndef some() -> 'test expression':\r\n ...\r\n```\r\n\r\nMakes `flake8-annotation-complexity` to fail. We need to ignore this case silently.\r\n\r\nRelated: #1170 \r\n\r\nDemo: https://asciinema.org/a/IIjIfkVKytmZ1F5c2YufMORdi\nAnnotation complexity should not fail on expressions\n# Bug report\r\n\r\nThis code:\r\n\r\n```python\r\ndef some() -> 'test expression':\r\n ...\r\n```\r\n\r\nMakes `flake8-annotation-complexity` to fail. We need to ignore this case silently.\r\n\r\nRelated: #1170 \r\n\r\nDemo: https://asciinema.org/a/IIjIfkVKytmZ1F5c2YufMORdi\n", "before_files": [{"content": "\"\"\"\nCounts annotation complexity by getting the nesting level of nodes.\n\nSo ``List[int]`` complexity is 2\nand ``Tuple[List[Optional[str]], int]`` is 4.\n\nAdapted from: https://github.com/best-doctor/flake8-annotations-complexity\n\"\"\"\n\nimport ast\nfrom typing import Union\n\n_Annotation = Union[\n ast.expr,\n ast.Str,\n]\n\n\ndef get_annotation_compexity(annotation_node: _Annotation) -> int:\n \"\"\"\n Recursevly counts complexity of annotation nodes.\n\n When annotations are written as strings,\n we additionally parse them to ``ast`` nodes.\n \"\"\"\n if isinstance(annotation_node, ast.Str):\n annotation_node = ast.parse( # type: ignore\n annotation_node.s,\n ).body[0].value\n\n if isinstance(annotation_node, ast.Subscript):\n return 1 + get_annotation_compexity(\n annotation_node.slice.value, # type: ignore\n )\n elif isinstance(annotation_node, (ast.Tuple, ast.List)):\n return max(\n (get_annotation_compexity(node) for node in annotation_node.elts),\n default=1,\n )\n return 1\n", "path": "wemake_python_styleguide/logic/complexity/annotations.py"}]} | 1,058 | 198 |
gh_patches_debug_10410 | rasdani/github-patches | git_diff | pypa__pip-6731 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docs for new pip debug command
This is a follow-up issue to PR #6638 to add docs for the new `pip debug` command. As @xavfernandez said in [this comment](https://github.com/pypa/pip/pull/6638#pullrequestreview-256090004):
> It would also need basic documentation (at least a `docs/html/reference/pip_debug.rst`) and most importantly (IMHO), strongly emphasize that the output and the options of this command are provisional and might change without notice.
</issue>
<code>
[start of src/pip/_internal/commands/debug.py]
1 from __future__ import absolute_import
2
3 import logging
4 import sys
5
6 from pip._internal.cli import cmdoptions
7 from pip._internal.cli.base_command import Command
8 from pip._internal.cli.cmdoptions import make_target_python
9 from pip._internal.cli.status_codes import SUCCESS
10 from pip._internal.utils.logging import indent_log
11 from pip._internal.utils.misc import get_pip_version
12 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
13 from pip._internal.wheel import format_tag
14
15 if MYPY_CHECK_RUNNING:
16 from typing import Any, List
17 from optparse import Values
18
19 logger = logging.getLogger(__name__)
20
21
22 def show_value(name, value):
23 # type: (str, str) -> None
24 logger.info('{}: {}'.format(name, value))
25
26
27 def show_sys_implementation():
28 # type: () -> None
29 logger.info('sys.implementation:')
30 if hasattr(sys, 'implementation'):
31 implementation = sys.implementation # type: ignore
32 implementation_name = implementation.name
33 else:
34 implementation_name = ''
35
36 with indent_log():
37 show_value('name', implementation_name)
38
39
40 def show_tags(options):
41 # type: (Values) -> None
42 tag_limit = 10
43
44 target_python = make_target_python(options)
45 tags = target_python.get_tags()
46
47 # Display the target options that were explicitly provided.
48 formatted_target = target_python.format_given()
49 suffix = ''
50 if formatted_target:
51 suffix = ' (target: {})'.format(formatted_target)
52
53 msg = 'Compatible tags: {}{}'.format(len(tags), suffix)
54 logger.info(msg)
55
56 if options.verbose < 1 and len(tags) > tag_limit:
57 tags_limited = True
58 tags = tags[:tag_limit]
59 else:
60 tags_limited = False
61
62 with indent_log():
63 for tag in tags:
64 logger.info(format_tag(tag))
65
66 if tags_limited:
67 msg = (
68 '...\n'
69 '[First {tag_limit} tags shown. Pass --verbose to show all.]'
70 ).format(tag_limit=tag_limit)
71 logger.info(msg)
72
73
74 class DebugCommand(Command):
75 """
76 Display debug information.
77 """
78
79 name = 'debug'
80 usage = """
81 %prog <options>"""
82 summary = 'Show information useful for debugging.'
83 ignore_require_venv = True
84
85 def __init__(self, *args, **kw):
86 super(DebugCommand, self).__init__(*args, **kw)
87
88 cmd_opts = self.cmd_opts
89 cmdoptions.add_target_python_options(cmd_opts)
90 self.parser.insert_option_group(0, cmd_opts)
91
92 def run(self, options, args):
93 # type: (Values, List[Any]) -> int
94 show_value('pip version', get_pip_version())
95 show_value('sys.version', sys.version)
96 show_value('sys.executable', sys.executable)
97 show_value('sys.platform', sys.platform)
98 show_sys_implementation()
99
100 show_tags(options)
101
102 return SUCCESS
103
[end of src/pip/_internal/commands/debug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py
--- a/src/pip/_internal/commands/debug.py
+++ b/src/pip/_internal/commands/debug.py
@@ -91,6 +91,12 @@
def run(self, options, args):
# type: (Values, List[Any]) -> int
+ logger.warning(
+ "This command is only meant for debugging. "
+ "Do not use this with automation for parsing and getting these "
+ "details, since the output and options of this command may "
+ "change without notice."
+ )
show_value('pip version', get_pip_version())
show_value('sys.version', sys.version)
show_value('sys.executable', sys.executable)
| {"golden_diff": "diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py\n--- a/src/pip/_internal/commands/debug.py\n+++ b/src/pip/_internal/commands/debug.py\n@@ -91,6 +91,12 @@\n \n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n+ logger.warning(\n+ \"This command is only meant for debugging. \"\n+ \"Do not use this with automation for parsing and getting these \"\n+ \"details, since the output and options of this command may \"\n+ \"change without notice.\"\n+ )\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n", "issue": "Add docs for new pip debug command\nThis is a follow-up issue to PR #6638 to add docs for the new `pip debug` command. As @xavfernandez said in [this comment](https://github.com/pypa/pip/pull/6638#pullrequestreview-256090004):\r\n\r\n> It would also need basic documentation (at least a `docs/html/reference/pip_debug.rst`) and most importantly (IMHO), strongly emphasize that the output and the options of this command are provisional and might change without notice.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport sys\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.cmdoptions import make_target_python\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import get_pip_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.wheel import format_tag\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, List\n from optparse import Values\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_value(name, value):\n # type: (str, str) -> None\n logger.info('{}: {}'.format(name, value))\n\n\ndef show_sys_implementation():\n # type: () -> None\n logger.info('sys.implementation:')\n if hasattr(sys, 'implementation'):\n implementation = sys.implementation # type: ignore\n implementation_name = implementation.name\n else:\n implementation_name = ''\n\n with indent_log():\n show_value('name', implementation_name)\n\n\ndef show_tags(options):\n # type: (Values) -> None\n tag_limit = 10\n\n target_python = make_target_python(options)\n tags = target_python.get_tags()\n\n # Display the target options that were explicitly provided.\n formatted_target = target_python.format_given()\n suffix = ''\n if formatted_target:\n suffix = ' (target: {})'.format(formatted_target)\n\n msg = 'Compatible tags: {}{}'.format(len(tags), suffix)\n logger.info(msg)\n\n if options.verbose < 1 and len(tags) > tag_limit:\n tags_limited = True\n tags = tags[:tag_limit]\n else:\n tags_limited = False\n\n with indent_log():\n for tag in tags:\n logger.info(format_tag(tag))\n\n if tags_limited:\n msg = (\n '...\\n'\n '[First {tag_limit} tags shown. Pass --verbose to show all.]'\n ).format(tag_limit=tag_limit)\n logger.info(msg)\n\n\nclass DebugCommand(Command):\n \"\"\"\n Display debug information.\n \"\"\"\n\n name = 'debug'\n usage = \"\"\"\n %prog <options>\"\"\"\n summary = 'Show information useful for debugging.'\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(DebugCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n\n show_tags(options)\n\n return SUCCESS\n", "path": "src/pip/_internal/commands/debug.py"}]} | 1,521 | 175 |
gh_patches_debug_3521 | rasdani/github-patches | git_diff | wagtail__wagtail-2465 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove redundant template debug lines from project template
Ref: https://github.com/torchbox/wagtail/blob/9ff7961a3c8f508ad17735cd815335bad12fd67f/wagtail/project_template/project_name/settings/dev.py#L7-L8
#1688
According to https://docs.djangoproject.com/en/1.9/topics/templates/#django.template.backends.django.DjangoTemplates, the 'debug' option on the DjangoTemplates engine defaults to the global DEBUG setting, so setting this here is apparently redundant. (Also, there's no corresponding option for the Jinja2 backend, so setting this for all engines is not strictly correct.)
So, we just need someone to double-check that with these lines removed, template debug info still displays in development mode but not in production.
</issue>
<code>
[start of wagtail/project_template/project_name/settings/dev.py]
1 from __future__ import absolute_import, unicode_literals
2
3 from .base import *
4
5 # SECURITY WARNING: don't run with debug turned on in production!
6 DEBUG = True
7
8 for template_engine in TEMPLATES:
9 template_engine['OPTIONS']['debug'] = True
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = '{{ secret_key }}'
13
14
15 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
16
17
18 try:
19 from .local import *
20 except ImportError:
21 pass
22
[end of wagtail/project_template/project_name/settings/dev.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/project_template/project_name/settings/dev.py b/wagtail/project_template/project_name/settings/dev.py
--- a/wagtail/project_template/project_name/settings/dev.py
+++ b/wagtail/project_template/project_name/settings/dev.py
@@ -5,9 +5,6 @@
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
-for template_engine in TEMPLATES:
- template_engine['OPTIONS']['debug'] = True
-
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
| {"golden_diff": "diff --git a/wagtail/project_template/project_name/settings/dev.py b/wagtail/project_template/project_name/settings/dev.py\n--- a/wagtail/project_template/project_name/settings/dev.py\n+++ b/wagtail/project_template/project_name/settings/dev.py\n@@ -5,9 +5,6 @@\n # SECURITY WARNING: don't run with debug turned on in production!\n DEBUG = True\n \n-for template_engine in TEMPLATES:\n- template_engine['OPTIONS']['debug'] = True\n-\n # SECURITY WARNING: keep the secret key used in production secret!\n SECRET_KEY = '{{ secret_key }}'\n", "issue": "Remove redundant template debug lines from project template\nRef: https://github.com/torchbox/wagtail/blob/9ff7961a3c8f508ad17735cd815335bad12fd67f/wagtail/project_template/project_name/settings/dev.py#L7-L8\n#1688\n\nAccording to https://docs.djangoproject.com/en/1.9/topics/templates/#django.template.backends.django.DjangoTemplates, the 'debug' option on the DjangoTemplates engine defaults to the global DEBUG setting, so setting this here is apparently redundant. (Also, there's no corresponding option for the Jinja2 backend, so setting this for all engines is not strictly correct.)\n\nSo, we just need someone to double-check that with these lines removed, template debug info still displays in development mode but not in production.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nfor template_engine in TEMPLATES:\n template_engine['OPTIONS']['debug'] = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '{{ secret_key }}'\n\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "wagtail/project_template/project_name/settings/dev.py"}]} | 868 | 122 |
gh_patches_debug_3489 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enforce the minimal coverage to 100%
</issue>
<code>
[start of cookiecutter/utils.py]
1 """Helper functions used throughout Cookiecutter."""
2 import contextlib
3 import errno
4 import logging
5 import os
6 import shutil
7 import stat
8 import sys
9
10 from cookiecutter.prompt import read_user_yes_no
11
12 logger = logging.getLogger(__name__)
13
14
15 def force_delete(func, path, exc_info):
16 """Error handler for `shutil.rmtree()` equivalent to `rm -rf`.
17
18 Usage: `shutil.rmtree(path, onerror=force_delete)`
19 From stackoverflow.com/questions/1889597
20 """
21 os.chmod(path, stat.S_IWRITE)
22 func(path)
23
24
25 def rmtree(path):
26 """Remove a directory and all its contents. Like rm -rf on Unix.
27
28 :param path: A directory path.
29 """
30 shutil.rmtree(path, onerror=force_delete)
31
32
33 def make_sure_path_exists(path):
34 """Ensure that a directory exists.
35
36 :param path: A directory path.
37 """
38 logger.debug('Making sure path exists: %s', path)
39 try:
40 os.makedirs(path)
41 logger.debug('Created directory at: %s', path)
42 except OSError as exception:
43 if exception.errno != errno.EEXIST:
44 return False
45 return True
46
47
48 @contextlib.contextmanager
49 def work_in(dirname=None):
50 """Context manager version of os.chdir.
51
52 When exited, returns to the working directory prior to entering.
53 """
54 curdir = os.getcwd()
55 try:
56 if dirname is not None:
57 os.chdir(dirname)
58 yield
59 finally:
60 os.chdir(curdir)
61
62
63 def make_executable(script_path):
64 """Make `script_path` executable.
65
66 :param script_path: The file to change
67 """
68 status = os.stat(script_path)
69 os.chmod(script_path, status.st_mode | stat.S_IEXEC)
70
71
72 def prompt_and_delete(path, no_input=False):
73 """
74 Ask user if it's okay to delete the previously-downloaded file/directory.
75
76 If yes, delete it. If no, checks to see if the old version should be
77 reused. If yes, it's reused; otherwise, Cookiecutter exits.
78
79 :param path: Previously downloaded zipfile.
80 :param no_input: Suppress prompt to delete repo and just delete it.
81 :return: True if the content was deleted
82 """
83 # Suppress prompt if called via API
84 if no_input:
85 ok_to_delete = True
86 else:
87 question = (
88 "You've downloaded {} before. Is it okay to delete and re-download it?"
89 ).format(path)
90
91 ok_to_delete = read_user_yes_no(question, 'yes')
92
93 if ok_to_delete:
94 if os.path.isdir(path):
95 rmtree(path)
96 else:
97 os.remove(path)
98 return True
99 else:
100 ok_to_reuse = read_user_yes_no(
101 "Do you want to re-use the existing version?", 'yes'
102 )
103
104 if ok_to_reuse:
105 return False
106
107 sys.exit()
108
[end of cookiecutter/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/utils.py b/cookiecutter/utils.py
--- a/cookiecutter/utils.py
+++ b/cookiecutter/utils.py
@@ -16,7 +16,7 @@
"""Error handler for `shutil.rmtree()` equivalent to `rm -rf`.
Usage: `shutil.rmtree(path, onerror=force_delete)`
- From stackoverflow.com/questions/1889597
+ From https://docs.python.org/3/library/shutil.html#rmtree-example
"""
os.chmod(path, stat.S_IWRITE)
func(path)
| {"golden_diff": "diff --git a/cookiecutter/utils.py b/cookiecutter/utils.py\n--- a/cookiecutter/utils.py\n+++ b/cookiecutter/utils.py\n@@ -16,7 +16,7 @@\n \"\"\"Error handler for `shutil.rmtree()` equivalent to `rm -rf`.\n \n Usage: `shutil.rmtree(path, onerror=force_delete)`\n- From stackoverflow.com/questions/1889597\n+ From https://docs.python.org/3/library/shutil.html#rmtree-example\n \"\"\"\n os.chmod(path, stat.S_IWRITE)\n func(path)\n", "issue": "Enforce the minimal coverage to 100%\n\n", "before_files": [{"content": "\"\"\"Helper functions used throughout Cookiecutter.\"\"\"\nimport contextlib\nimport errno\nimport logging\nimport os\nimport shutil\nimport stat\nimport sys\n\nfrom cookiecutter.prompt import read_user_yes_no\n\nlogger = logging.getLogger(__name__)\n\n\ndef force_delete(func, path, exc_info):\n \"\"\"Error handler for `shutil.rmtree()` equivalent to `rm -rf`.\n\n Usage: `shutil.rmtree(path, onerror=force_delete)`\n From stackoverflow.com/questions/1889597\n \"\"\"\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef rmtree(path):\n \"\"\"Remove a directory and all its contents. Like rm -rf on Unix.\n\n :param path: A directory path.\n \"\"\"\n shutil.rmtree(path, onerror=force_delete)\n\n\ndef make_sure_path_exists(path):\n \"\"\"Ensure that a directory exists.\n\n :param path: A directory path.\n \"\"\"\n logger.debug('Making sure path exists: %s', path)\n try:\n os.makedirs(path)\n logger.debug('Created directory at: %s', path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n return False\n return True\n\n\[email protected]\ndef work_in(dirname=None):\n \"\"\"Context manager version of os.chdir.\n\n When exited, returns to the working directory prior to entering.\n \"\"\"\n curdir = os.getcwd()\n try:\n if dirname is not None:\n os.chdir(dirname)\n yield\n finally:\n os.chdir(curdir)\n\n\ndef make_executable(script_path):\n \"\"\"Make `script_path` executable.\n\n :param script_path: The file to change\n \"\"\"\n status = os.stat(script_path)\n os.chmod(script_path, status.st_mode | stat.S_IEXEC)\n\n\ndef prompt_and_delete(path, no_input=False):\n \"\"\"\n Ask user if it's okay to delete the previously-downloaded file/directory.\n\n If yes, delete it. If no, checks to see if the old version should be\n reused. If yes, it's reused; otherwise, Cookiecutter exits.\n\n :param path: Previously downloaded zipfile.\n :param no_input: Suppress prompt to delete repo and just delete it.\n :return: True if the content was deleted\n \"\"\"\n # Suppress prompt if called via API\n if no_input:\n ok_to_delete = True\n else:\n question = (\n \"You've downloaded {} before. Is it okay to delete and re-download it?\"\n ).format(path)\n\n ok_to_delete = read_user_yes_no(question, 'yes')\n\n if ok_to_delete:\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n return True\n else:\n ok_to_reuse = read_user_yes_no(\n \"Do you want to re-use the existing version?\", 'yes'\n )\n\n if ok_to_reuse:\n return False\n\n sys.exit()\n", "path": "cookiecutter/utils.py"}]} | 1,409 | 133 |
gh_patches_debug_35096 | rasdani/github-patches | git_diff | kornia__kornia-1687 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kornia.augmentation.resize does not have antialias flag, unlike kornia.geometry.transform.resize
### Describe the bug
Check
https://kornia.readthedocs.io/en/latest/_modules/kornia/augmentation/_2d/geometric/resize.html#Resize
versus
https://kornia.readthedocs.io/en/latest/geometry.transform.html#kornia.geometry.transform.Resize
</issue>
<code>
[start of kornia/augmentation/_2d/geometric/resize.py]
1 from typing import Dict, Optional, Tuple, Union, cast
2
3 import torch
4 from torch import Tensor
5
6 from kornia.augmentation import random_generator as rg
7 from kornia.augmentation._2d.geometric.base import GeometricAugmentationBase2D
8 from kornia.constants import Resample
9 from kornia.geometry.transform import crop_by_transform_mat, get_perspective_transform, resize
10 from kornia.utils import eye_like
11
12
13 class Resize(GeometricAugmentationBase2D):
14 """Resize to size.
15
16 Args:
17 size: Size (h, w) in pixels of the resized region or just one side.
18 side: Which side to resize, if size is only of type int.
19 resample: Resampling mode.
20 align_corners: interpolation flag.
21 keepdim: whether to keep the output shape the same as input (True) or broadcast it
22 to the batch form (False).
23 """
24
25 def __init__(
26 self,
27 size: Union[int, Tuple[int, int]],
28 side: str = "short",
29 resample: Union[str, int, Resample] = Resample.BILINEAR.name,
30 align_corners: bool = True,
31 p: float = 1.0,
32 return_transform: Optional[bool] = None,
33 keepdim: bool = False,
34 ) -> None:
35 super().__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)
36 self._param_generator = cast(rg.ResizeGenerator, rg.ResizeGenerator(resize_to=size, side=side))
37 self.flags = dict(size=size, side=side, resample=Resample.get(resample), align_corners=align_corners)
38
39 def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor:
40 if params["output_size"] == input.shape[-2:]:
41 return eye_like(3, input)
42
43 transform: Tensor = get_perspective_transform(params["src"], params["dst"])
44 transform = transform.expand(input.shape[0], -1, -1)
45 return transform
46
47 def apply_transform(
48 self, input: Tensor, params: Dict[str, Tensor], transform: Optional[Tensor] = None
49 ) -> Tensor:
50 B, C, _, _ = input.shape
51 out_size = tuple(params["output_size"][0].tolist())
52 out = torch.empty(B, C, *out_size, device=input.device, dtype=input.dtype)
53 for i in range(B):
54 x1 = int(params["src"][i, 0, 0])
55 x2 = int(params["src"][i, 1, 0]) + 1
56 y1 = int(params["src"][i, 0, 1])
57 y2 = int(params["src"][i, 3, 1]) + 1
58 out[i] = resize(
59 input[i : i + 1, :, y1:y2, x1:x2],
60 out_size,
61 interpolation=(self.flags["resample"].name).lower(),
62 align_corners=self.flags["align_corners"],
63 )
64 return out
65
66 def inverse_transform(
67 self,
68 input: Tensor,
69 transform: Optional[Tensor] = None,
70 size: Optional[Tuple[int, int]] = None,
71 **kwargs,
72 ) -> Tensor:
73 size = cast(Tuple[int, int], size)
74 mode = self.flags["resample"].name.lower() if "mode" not in kwargs else kwargs["mode"]
75 align_corners = self.flags["align_corners"] if "align_corners" not in kwargs else kwargs["align_corners"]
76 padding_mode = "zeros" if "padding_mode" not in kwargs else kwargs["padding_mode"]
77 transform = cast(Tensor, transform)
78 return crop_by_transform_mat(input, transform[:, :2, :], size, mode, padding_mode, align_corners)
79
80
81 class LongestMaxSize(Resize):
82 """Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image.
83
84 Args:
85 max_size: maximum size of the image after the transformation.
86 """
87
88 def __init__(
89 self,
90 max_size: int,
91 resample: Union[str, int, Resample] = Resample.BILINEAR.name,
92 align_corners: bool = True,
93 p: float = 1.0,
94 return_transform: Optional[bool] = None,
95 ) -> None:
96 # TODO: Support max_size list input to randomly select from
97 super().__init__(
98 size=max_size,
99 side="long",
100 resample=resample,
101 return_transform=return_transform,
102 align_corners=align_corners,
103 p=p,
104 )
105
106
107 class SmallestMaxSize(Resize):
108 """Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image.
109
110 Args:
111 max_size: maximum size of the image after the transformation.
112 """
113
114 def __init__(
115 self,
116 max_size: int,
117 resample: Union[str, int, Resample] = Resample.BILINEAR.name,
118 align_corners: bool = True,
119 p: float = 1.0,
120 return_transform: Optional[bool] = None,
121 ) -> None:
122 # TODO: Support max_size list input to randomly select from
123 super().__init__(
124 size=max_size,
125 side="short",
126 resample=resample,
127 return_transform=return_transform,
128 align_corners=align_corners,
129 p=p,
130 )
131
[end of kornia/augmentation/_2d/geometric/resize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/augmentation/_2d/geometric/resize.py b/kornia/augmentation/_2d/geometric/resize.py
--- a/kornia/augmentation/_2d/geometric/resize.py
+++ b/kornia/augmentation/_2d/geometric/resize.py
@@ -18,6 +18,7 @@
side: Which side to resize, if size is only of type int.
resample: Resampling mode.
align_corners: interpolation flag.
+ antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
"""
@@ -28,13 +29,20 @@
side: str = "short",
resample: Union[str, int, Resample] = Resample.BILINEAR.name,
align_corners: bool = True,
+ antialias: bool = False,
p: float = 1.0,
return_transform: Optional[bool] = None,
keepdim: bool = False,
) -> None:
super().__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)
self._param_generator = cast(rg.ResizeGenerator, rg.ResizeGenerator(resize_to=size, side=side))
- self.flags = dict(size=size, side=side, resample=Resample.get(resample), align_corners=align_corners)
+ self.flags = dict(
+ size=size,
+ side=side,
+ resample=Resample.get(resample),
+ align_corners=align_corners,
+ antialias=antialias
+ )
def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor:
if params["output_size"] == input.shape[-2:]:
@@ -60,6 +68,7 @@
out_size,
interpolation=(self.flags["resample"].name).lower(),
align_corners=self.flags["align_corners"],
+ antialias=self.flags["antialias"]
)
return out
| {"golden_diff": "diff --git a/kornia/augmentation/_2d/geometric/resize.py b/kornia/augmentation/_2d/geometric/resize.py\n--- a/kornia/augmentation/_2d/geometric/resize.py\n+++ b/kornia/augmentation/_2d/geometric/resize.py\n@@ -18,6 +18,7 @@\n side: Which side to resize, if size is only of type int.\n resample: Resampling mode.\n align_corners: interpolation flag.\n+ antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling.\n keepdim: whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False).\n \"\"\"\n@@ -28,13 +29,20 @@\n side: str = \"short\",\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n align_corners: bool = True,\n+ antialias: bool = False,\n p: float = 1.0,\n return_transform: Optional[bool] = None,\n keepdim: bool = False,\n ) -> None:\n super().__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)\n self._param_generator = cast(rg.ResizeGenerator, rg.ResizeGenerator(resize_to=size, side=side))\n- self.flags = dict(size=size, side=side, resample=Resample.get(resample), align_corners=align_corners)\n+ self.flags = dict(\n+ size=size,\n+ side=side,\n+ resample=Resample.get(resample),\n+ align_corners=align_corners,\n+ antialias=antialias\n+ )\n \n def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor:\n if params[\"output_size\"] == input.shape[-2:]:\n@@ -60,6 +68,7 @@\n out_size,\n interpolation=(self.flags[\"resample\"].name).lower(),\n align_corners=self.flags[\"align_corners\"],\n+ antialias=self.flags[\"antialias\"]\n )\n return out\n", "issue": "kornia.augmentation.resize does not have antialias flag, unlike kornia.geometry.transform.resize\n### Describe the bug\r\n\r\nCheck \r\nhttps://kornia.readthedocs.io/en/latest/_modules/kornia/augmentation/_2d/geometric/resize.html#Resize\r\n\r\nversus \r\nhttps://kornia.readthedocs.io/en/latest/geometry.transform.html#kornia.geometry.transform.Resize\n", "before_files": [{"content": "from typing import Dict, Optional, Tuple, Union, cast\n\nimport torch\nfrom torch import Tensor\n\nfrom kornia.augmentation import random_generator as rg\nfrom kornia.augmentation._2d.geometric.base import GeometricAugmentationBase2D\nfrom kornia.constants import Resample\nfrom kornia.geometry.transform import crop_by_transform_mat, get_perspective_transform, resize\nfrom kornia.utils import eye_like\n\n\nclass Resize(GeometricAugmentationBase2D):\n \"\"\"Resize to size.\n\n Args:\n size: Size (h, w) in pixels of the resized region or just one side.\n side: Which side to resize, if size is only of type int.\n resample: Resampling mode.\n align_corners: interpolation flag.\n keepdim: whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False).\n \"\"\"\n\n def __init__(\n self,\n size: Union[int, Tuple[int, int]],\n side: str = \"short\",\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n align_corners: bool = True,\n p: float = 1.0,\n return_transform: Optional[bool] = None,\n keepdim: bool = False,\n ) -> None:\n super().__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)\n self._param_generator = cast(rg.ResizeGenerator, rg.ResizeGenerator(resize_to=size, side=side))\n self.flags = dict(size=size, side=side, resample=Resample.get(resample), align_corners=align_corners)\n\n def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor:\n if params[\"output_size\"] == input.shape[-2:]:\n return eye_like(3, input)\n\n transform: Tensor = get_perspective_transform(params[\"src\"], params[\"dst\"])\n transform = transform.expand(input.shape[0], -1, -1)\n return transform\n\n def apply_transform(\n self, input: Tensor, params: Dict[str, Tensor], transform: Optional[Tensor] = None\n ) -> Tensor:\n B, C, _, _ = input.shape\n out_size = tuple(params[\"output_size\"][0].tolist())\n out = torch.empty(B, C, *out_size, device=input.device, dtype=input.dtype)\n for i in range(B):\n x1 = int(params[\"src\"][i, 0, 0])\n x2 = int(params[\"src\"][i, 1, 0]) + 1\n y1 = int(params[\"src\"][i, 0, 1])\n y2 = int(params[\"src\"][i, 3, 1]) + 1\n out[i] = resize(\n input[i : i + 1, :, y1:y2, x1:x2],\n out_size,\n interpolation=(self.flags[\"resample\"].name).lower(),\n align_corners=self.flags[\"align_corners\"],\n )\n return out\n\n def inverse_transform(\n self,\n input: Tensor,\n transform: Optional[Tensor] = None,\n size: Optional[Tuple[int, int]] = None,\n **kwargs,\n ) -> Tensor:\n size = cast(Tuple[int, int], size)\n mode = self.flags[\"resample\"].name.lower() if \"mode\" not in kwargs else kwargs[\"mode\"]\n align_corners = self.flags[\"align_corners\"] if \"align_corners\" not in kwargs else kwargs[\"align_corners\"]\n padding_mode = \"zeros\" if \"padding_mode\" not in kwargs else kwargs[\"padding_mode\"]\n transform = cast(Tensor, transform)\n return crop_by_transform_mat(input, transform[:, :2, :], size, mode, padding_mode, align_corners)\n\n\nclass LongestMaxSize(Resize):\n \"\"\"Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image.\n\n Args:\n max_size: maximum size of the image after the transformation.\n \"\"\"\n\n def __init__(\n self,\n max_size: int,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n align_corners: bool = True,\n p: float = 1.0,\n return_transform: Optional[bool] = None,\n ) -> None:\n # TODO: Support max_size list input to randomly select from\n super().__init__(\n size=max_size,\n side=\"long\",\n resample=resample,\n return_transform=return_transform,\n align_corners=align_corners,\n p=p,\n )\n\n\nclass SmallestMaxSize(Resize):\n \"\"\"Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image.\n\n Args:\n max_size: maximum size of the image after the transformation.\n \"\"\"\n\n def __init__(\n self,\n max_size: int,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n align_corners: bool = True,\n p: float = 1.0,\n return_transform: Optional[bool] = None,\n ) -> None:\n # TODO: Support max_size list input to randomly select from\n super().__init__(\n size=max_size,\n side=\"short\",\n resample=resample,\n return_transform=return_transform,\n align_corners=align_corners,\n p=p,\n )\n", "path": "kornia/augmentation/_2d/geometric/resize.py"}]} | 2,114 | 479 |
gh_patches_debug_1356 | rasdani/github-patches | git_diff | kserve__kserve-2103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot install required version of numpy on M1 mac
/kind bug
Issue:
Installation on python 3.8 or 3.9 (and presumably all versions of Python) of the v0.8.0 release candidate fails due to the pinned requirement of numpy.
Expected behavior:
kserve's release candidate for 0.8 can be installed on an M1 mac.
Extra information:
https://github.com/numpy/numpy/releases/tag/v1.21.0 numpy 1.21+ allows installation on M1 macs.
**Environment:**
- OS (e.g. from `/etc/os-release`): M1 mac
</issue>
<code>
[start of python/lgbserver/setup.py]
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-asyncio',
20 'pytest-tornasync',
21 'mypy'
22 ]
23
24 setup(
25 name='lgbserver',
26 version='0.7.0',
27 author_email='[email protected]',
28 license='../../LICENSE.txt',
29 url='https://github.com/kserve/kserve/python/lgbserver',
30 description='Model Server implementation for LightGBM. \
31 Not intended for use outside KServe Frameworks Images',
32 long_description=open('README.md').read(),
33 python_requires='>3.4',
34 packages=find_packages("lgbserver"),
35 install_requires=[
36 "kserve>=0.7.0",
37 "lightgbm == 3.3.2",
38 "pandas == 0.25.3",
39 "argparse >= 1.4.0",
40 ],
41 tests_require=tests_require,
42 extras_require={'test': tests_require}
43 )
44
[end of python/lgbserver/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py
--- a/python/lgbserver/setup.py
+++ b/python/lgbserver/setup.py
@@ -35,7 +35,7 @@
install_requires=[
"kserve>=0.7.0",
"lightgbm == 3.3.2",
- "pandas == 0.25.3",
+ "pandas == 1.3.5",
"argparse >= 1.4.0",
],
tests_require=tests_require,
| {"golden_diff": "diff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py\n--- a/python/lgbserver/setup.py\n+++ b/python/lgbserver/setup.py\n@@ -35,7 +35,7 @@\n install_requires=[\n \"kserve>=0.7.0\",\n \"lightgbm == 3.3.2\",\n- \"pandas == 0.25.3\",\n+ \"pandas == 1.3.5\",\n \"argparse >= 1.4.0\",\n ],\n tests_require=tests_require,\n", "issue": "Cannot install required version of numpy on M1 mac\n/kind bug\r\n\r\nIssue:\r\nInstallation on python 3.8 or 3.9 (and presumably all versions of Python) of the v0.8.0 release candidate fails due to the pinned requirement of numpy.\r\n\r\nExpected behavior:\r\nkserve's release candidate for 0.8 can be installed on an M1 mac.\r\n\r\nExtra information:\r\nhttps://github.com/numpy/numpy/releases/tag/v1.21.0 numpy 1.21+ allows installation on M1 macs.\r\n\r\n\r\n**Environment:**\r\n\r\n- OS (e.g. from `/etc/os-release`): M1 mac\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.7.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/python/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KServe Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kserve>=0.7.0\",\n \"lightgbm == 3.3.2\",\n \"pandas == 0.25.3\",\n \"argparse >= 1.4.0\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py"}]} | 1,100 | 124 |
gh_patches_debug_18073 | rasdani/github-patches | git_diff | kartoza__prj.app-895 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display the version number in the footer

In the footer, next to "Available on github", we can display the version number from this file: https://github.com/kartoza/projecta/blob/develop/django_project/.version
To be able to know between staging and production which version we are running
Sentry is already reading this file: https://github.com/kartoza/projecta/blob/develop/django_project/core/settings/prod.py#L47
</issue>
<code>
[start of django_project/lesson/templatetags/lesson_tags.py]
1 # coding=utf-8
2 """Custom tags for lesson app."""
3
4 from django import template
5 from django.utils.safestring import mark_safe
6
7 register = template.Library()
8
9
10 @register.filter(name='is_translation_up_to_date')
11 def is_translation_up_to_date(value):
12 if not value.is_translation_up_to_date:
13 return mark_safe(
14 '<span title="Translation is outdated"><sup>❗</sup></span>')
15 else:
16 return mark_safe('')
17
[end of django_project/lesson/templatetags/lesson_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/lesson/templatetags/lesson_tags.py b/django_project/lesson/templatetags/lesson_tags.py
--- a/django_project/lesson/templatetags/lesson_tags.py
+++ b/django_project/lesson/templatetags/lesson_tags.py
@@ -1,8 +1,9 @@
# coding=utf-8
"""Custom tags for lesson app."""
-
from django import template
from django.utils.safestring import mark_safe
+from core.settings.utils import absolute_path
+
register = template.Library()
@@ -14,3 +15,16 @@
'<span title="Translation is outdated"><sup>❗</sup></span>')
else:
return mark_safe('')
+
+
[email protected]_tag(takes_context=True)
+def version_tag(context):
+ """Reads current project release from the .version file."""
+ version_file = absolute_path('.version')
+ try:
+ with open(version_file, 'r') as file:
+ version = file.read()
+ context['version'] = version
+ except IOError:
+ context['version'] = 'Unknown'
+ return context['version']
| {"golden_diff": "diff --git a/django_project/lesson/templatetags/lesson_tags.py b/django_project/lesson/templatetags/lesson_tags.py\n--- a/django_project/lesson/templatetags/lesson_tags.py\n+++ b/django_project/lesson/templatetags/lesson_tags.py\n@@ -1,8 +1,9 @@\n # coding=utf-8\n \"\"\"Custom tags for lesson app.\"\"\"\n-\n from django import template\n from django.utils.safestring import mark_safe\n+from core.settings.utils import absolute_path\n+\n \n register = template.Library()\n \n@@ -14,3 +15,16 @@\n '<span title=\"Translation is outdated\"><sup>❗</sup></span>')\n else:\n return mark_safe('')\n+\n+\[email protected]_tag(takes_context=True)\n+def version_tag(context):\n+ \"\"\"Reads current project release from the .version file.\"\"\"\n+ version_file = absolute_path('.version')\n+ try:\n+ with open(version_file, 'r') as file:\n+ version = file.read()\n+ context['version'] = version\n+ except IOError:\n+ context['version'] = 'Unknown'\n+ return context['version']\n", "issue": "Display the version number in the footer\n\r\n\r\nIn the footer, next to \"Available on github\", we can display the version number from this file: https://github.com/kartoza/projecta/blob/develop/django_project/.version\r\n\r\nTo be able to know between staging and production which version we are running\r\n\r\nSentry is already reading this file: https://github.com/kartoza/projecta/blob/develop/django_project/core/settings/prod.py#L47\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Custom tags for lesson app.\"\"\"\n\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='is_translation_up_to_date')\ndef is_translation_up_to_date(value):\n if not value.is_translation_up_to_date:\n return mark_safe(\n '<span title=\"Translation is outdated\"><sup>❗</sup></span>')\n else:\n return mark_safe('')\n", "path": "django_project/lesson/templatetags/lesson_tags.py"}]} | 855 | 264 |
gh_patches_debug_51512 | rasdani/github-patches | git_diff | pulp__pulpcore-210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[noissue]: Update django-lifecycle requirement from <=1.1.2,>=1.0 to >=1.0,<=1.2.0
Updates the requirements on [django-lifecycle](https://github.com/rsinger86/django-lifecycle) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/rsinger86/django-lifecycle/releases">django-lifecycle's releases</a>.</em></p>
<blockquote>
<h2>1.2.0</h2>
<h2>What's Changed</h2>
<ul>
<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s) by <a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a> in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/151">rsinger86/django-lifecycle#151</a></li>
<li>Hook condition can be now built using some predefined conditions and/or with custom ones by <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a> in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/150">rsinger86/django-lifecycle#150</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a> made their first contribution in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/151">rsinger86/django-lifecycle#151</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0">https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0</a></p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/rsinger86/django-lifecycle/blob/master/CHANGELOG.md">django-lifecycle's changelog</a>.</em></p>
<blockquote>
<h1>1.2.0 (February 2024)</h1>
<ul>
<li>Hook condition can be now built using some predefined conditions and/or with custom ones.</li>
<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s). Thanks <a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a></li>
</ul>
<h1>1.1.2 (November 2023)</h1>
<ul>
<li>Fix: Hooks were failing if some watched field (those in <code>when=""</code> or <code>when_any=[...]</code>) was a <code>GenericForeignKey</code></li>
</ul>
<h2>1.1.1 (November 2023)</h2>
<ul>
<li>Fix: Include missing <code>django_lifecycle_checks</code> into python package</li>
</ul>
<h2>1.1.0 (November 2023)</h2>
<ul>
<li>Drop support for Django < 2.2.</li>
<li>Confirm support for Django 5.0. Thanks <a href="https://github.com/adamchainz"><code>@adamchainz</code></a>!</li>
<li>Remove urlman from required packages. Thanks <a href="https://github.com/DmytroLitvinov"><code>@DmytroLitvinov</code></a>!</li>
<li>Add an optional Django check to avoid errors by not inheriting from <code>LifecycleModelMixin</code> (or <code>LifecycleModel</code>)</li>
</ul>
<h2>1.0.2 (September 2023)</h2>
<ul>
<li>Correct package info to note that Django 4.0, 4.1, and 4.2 are supported.</li>
</ul>
<h2>1.0.1 (August 2023)</h2>
<ul>
<li>Initial state gets reset using <code>transaction.on_commit()</code>, fixing the <code>has_changed()</code> and <code>initial_value()</code> methods for on_commit hooks. Thanks <a href="https://github.com/alb3rto269"><code>@alb3rto269</code></a>!</li>
</ul>
<h2>1.0.0 (May 2022)</h2>
<ul>
<li>Drops Python 3.6 support</li>
<li>Adds <code>priority</code> hook kwarg to control the order in which hooked methods fire. Thanks <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a>!</li>
<li>Internal cleanup/refactoring. Thanks <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a>!</li>
</ul>
<h2>0.9.6 (February 2022)</h2>
<ul>
<li>Adds missing <code>packaging</code> to <code>install_requires</code>. Thanks <a href="https://github.com/mikedep333"><code>@mikedep333</code></a>!</li>
</ul>
<h2>0.9.5 (February 2022)</h2>
<ul>
<li>Makes the <code>has_changed</code>, <code>changes_to</code> conditions depend on whether the field in question was included in the SQL update/insert statement by checking
the <code>update_fields</code> argument passed to save.</li>
</ul>
<h2>0.9.4 (February 2022)</h2>
<ul>
<li>Adds optional <a href="https://github.com/hook"><code>@hook</code></a> <code>on_commit</code> argument for executing hooks when the database transaction is committed. Thanks <a href="https://github.com/amcclosky"><code>@amcclosky</code></a>!</li>
</ul>
<h2>0.9.3 (October 2021)</h2>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/4cc4c0b8f5688f87e520a949350519619311815a"><code>4cc4c0b</code></a> chore: Prepare release 1.2.0</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/2ba92cec9b54ec89a29cf8870b81acb7c9c41f05"><code>2ba92ce</code></a> Merge pull request <a href="https://redirect.github.com/rsinger86/django-lifecycle/issues/150">#150</a> from rsinger86/feature/generalize-conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/b8b6336e19de4a6e5eb8ad6e52dda0257e57ce94"><code>b8b6336</code></a> refactor: Rename conditions as suggested by <a href="https://github.com/mateocpdev"><code>@mateocpdev</code></a></li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/bca05776ee77cf61f16d4bc8fc145d76bc276b01"><code>bca0577</code></a> docs: Update docs to add new way to add conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/f2c6b803c5d4445d00c07ffeec7e5c73f75d0f6a"><code>f2c6b80</code></a> feat: Allow to specify both condition or legacy params</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/b6581622b49a906b1aab6ae07c80a29d61e77d2d"><code>b658162</code></a> feat: Make conditions chainable through <code>&</code> and <code>|</code></li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/6eb62e7ee5dbb8b2bca9c4c3f8eb0086765c4267"><code>6eb62e7</code></a> feat: Replicate conditions tests from LifecycleMixinTests using new conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/1b2ccb86a583f729ff916f473562814c30b446aa"><code>1b2ccb8</code></a> Merge remote-tracking branch 'origin/master' into feature/generalize-conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/2d1b46672b7f660012dbff08a7fe29f6d10094ed"><code>2d1b466</code></a> Merge pull request <a href="https://redirect.github.com/rsinger86/django-lifecycle/issues/151">#151</a> from AlaaNour94/capture_mutable_fields_changes</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/98b69493e3914299d67a7eb8b05146bcfe20ea53"><code>98b6949</code></a> capture mutable fields changes</li>
<li>Additional commits viewable in <a href="https://github.com/rsinger86/django-lifecycle/compare/1.0.0...1.2.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
</issue>
<code>
[start of pulpcore/app/models/upload.py]
1 import hashlib
2 import os
3
4 from django.core.files.base import ContentFile
5 from django.db import models
6
7 from pulpcore.app.models import Model
8
9
10 class Upload(Model):
11 """
12 A chunked upload. Stores chunks until used to create an artifact, etc.
13
14 Fields:
15
16 file (models.FileField): The stored file.
17 size (models.BigIntegerField): The size of the file in bytes.
18 completed (models.DateTimeField): Time when the upload is committed
19 """
20
21 file = models.FileField(null=False, max_length=255)
22 size = models.BigIntegerField()
23 completed = models.DateTimeField(null=True)
24
25 def append(self, chunk, offset):
26 """
27 Append a chunk to an upload.
28
29 Args:
30 chunk (File): Binary file to append to the upload file.
31 offset (int): First byte position to write chunk to.
32 """
33 if not self.file:
34 self.file.save(os.path.join('upload', str(self.pk)), ContentFile(''))
35
36 with self.file.open(mode='r+b') as file:
37 file.seek(offset)
38 file.write(chunk.read())
39
40 self.chunks.create(offset=offset, size=len(chunk))
41
42 @property
43 def sha256(self, rehash=False):
44 if getattr(self, '_sha256', None) is None or rehash is True:
45 sha256 = hashlib.sha256()
46 with self.file.open(mode='rb') as file:
47 for chunk in file.chunks():
48 sha256.update(chunk)
49 self._sha256 = sha256.hexdigest()
50 return self._sha256
51
52
53 class UploadChunk(Model):
54 """
55 A chunk for an uploaded file.
56
57 Fields:
58
59 upload (models.ForeignKey): Upload this chunk belongs to.
60 offset (models.BigIntegerField): Start of the chunk in bytes.
61 size (models.BigIntegerField): Size of the chunk in bytes.
62 """
63
64 upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name='chunks')
65 offset = models.BigIntegerField()
66 size = models.BigIntegerField()
67
[end of pulpcore/app/models/upload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -49,6 +49,17 @@
self._sha256 = sha256.hexdigest()
return self._sha256
+ def delete(self, *args, **kwargs):
+ """
+ Deletes Upload model and the file associated with the model
+
+ Args:
+ args (list): list of positional arguments for Model.delete()
+ kwargs (dict): dictionary of keyword arguments to pass to Model.delete()
+ """
+ super().delete(*args, **kwargs)
+ self.file.delete(save=False)
+
class UploadChunk(Model):
"""
| {"golden_diff": "diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py\n--- a/pulpcore/app/models/upload.py\n+++ b/pulpcore/app/models/upload.py\n@@ -49,6 +49,17 @@\n self._sha256 = sha256.hexdigest()\n return self._sha256\n \n+ def delete(self, *args, **kwargs):\n+ \"\"\"\n+ Deletes Upload model and the file associated with the model\n+\n+ Args:\n+ args (list): list of positional arguments for Model.delete()\n+ kwargs (dict): dictionary of keyword arguments to pass to Model.delete()\n+ \"\"\"\n+ super().delete(*args, **kwargs)\n+ self.file.delete(save=False)\n+\n \n class UploadChunk(Model):\n \"\"\"\n", "issue": "[noissue]: Update django-lifecycle requirement from <=1.1.2,>=1.0 to >=1.0,<=1.2.0\nUpdates the requirements on [django-lifecycle](https://github.com/rsinger86/django-lifecycle) to permit the latest version.\n<details>\n<summary>Release notes</summary>\n<p><em>Sourced from <a href=\"https://github.com/rsinger86/django-lifecycle/releases\">django-lifecycle's releases</a>.</em></p>\n<blockquote>\n<h2>1.2.0</h2>\n<h2>What's Changed</h2>\n<ul>\n<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s) by <a href=\"https://github.com/AlaaNour94\"><code>@\u200bAlaaNour94</code></a> in <a href=\"https://redirect.github.com/rsinger86/django-lifecycle/pull/151\">rsinger86/django-lifecycle#151</a></li>\n<li>Hook condition can be now built using some predefined conditions and/or with custom ones by <a href=\"https://github.com/EnriqueSoria\"><code>@\u200bEnriqueSoria</code></a> in <a href=\"https://redirect.github.com/rsinger86/django-lifecycle/pull/150\">rsinger86/django-lifecycle#150</a></li>\n</ul>\n<h2>New Contributors</h2>\n<ul>\n<li><a href=\"https://github.com/AlaaNour94\"><code>@\u200bAlaaNour94</code></a> made their first contribution in <a href=\"https://redirect.github.com/rsinger86/django-lifecycle/pull/151\">rsinger86/django-lifecycle#151</a></li>\n</ul>\n<p><strong>Full Changelog</strong>: <a href=\"https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0\">https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0</a></p>\n</blockquote>\n</details>\n<details>\n<summary>Changelog</summary>\n<p><em>Sourced from <a href=\"https://github.com/rsinger86/django-lifecycle/blob/master/CHANGELOG.md\">django-lifecycle's changelog</a>.</em></p>\n<blockquote>\n<h1>1.2.0 (February 2024)</h1>\n<ul>\n<li>Hook condition can be now built using some predefined conditions and/or with custom ones.</li>\n<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s). Thanks <a href=\"https://github.com/AlaaNour94\"><code>@\u200bAlaaNour94</code></a></li>\n</ul>\n<h1>1.1.2 (November 2023)</h1>\n<ul>\n<li>Fix: Hooks were failing if some watched field (those in <code>when=""</code> or <code>when_any=[...]</code>) was a <code>GenericForeignKey</code></li>\n</ul>\n<h2>1.1.1 (November 2023)</h2>\n<ul>\n<li>Fix: Include missing <code>django_lifecycle_checks</code> into python package</li>\n</ul>\n<h2>1.1.0 (November 2023)</h2>\n<ul>\n<li>Drop support for Django < 2.2.</li>\n<li>Confirm support for Django 5.0. Thanks <a href=\"https://github.com/adamchainz\"><code>@\u200badamchainz</code></a>!</li>\n<li>Remove urlman from required packages. Thanks <a href=\"https://github.com/DmytroLitvinov\"><code>@\u200bDmytroLitvinov</code></a>!</li>\n<li>Add an optional Django check to avoid errors by not inheriting from <code>LifecycleModelMixin</code> (or <code>LifecycleModel</code>)</li>\n</ul>\n<h2>1.0.2 (September 2023)</h2>\n<ul>\n<li>Correct package info to note that Django 4.0, 4.1, and 4.2 are supported.</li>\n</ul>\n<h2>1.0.1 (August 2023)</h2>\n<ul>\n<li>Initial state gets reset using <code>transaction.on_commit()</code>, fixing the <code>has_changed()</code> and <code>initial_value()</code> methods for on_commit hooks. Thanks <a href=\"https://github.com/alb3rto269\"><code>@\u200balb3rto269</code></a>!</li>\n</ul>\n<h2>1.0.0 (May 2022)</h2>\n<ul>\n<li>Drops Python 3.6 support</li>\n<li>Adds <code>priority</code> hook kwarg to control the order in which hooked methods fire. Thanks <a href=\"https://github.com/EnriqueSoria\"><code>@\u200bEnriqueSoria</code></a>!</li>\n<li>Internal cleanup/refactoring. Thanks <a href=\"https://github.com/EnriqueSoria\"><code>@\u200bEnriqueSoria</code></a>!</li>\n</ul>\n<h2>0.9.6 (February 2022)</h2>\n<ul>\n<li>Adds missing <code>packaging</code> to <code>install_requires</code>. Thanks <a href=\"https://github.com/mikedep333\"><code>@\u200bmikedep333</code></a>!</li>\n</ul>\n<h2>0.9.5 (February 2022)</h2>\n<ul>\n<li>Makes the <code>has_changed</code>, <code>changes_to</code> conditions depend on whether the field in question was included in the SQL update/insert statement by checking\nthe <code>update_fields</code> argument passed to save.</li>\n</ul>\n<h2>0.9.4 (February 2022)</h2>\n<ul>\n<li>Adds optional <a href=\"https://github.com/hook\"><code>@\u200bhook</code></a> <code>on_commit</code> argument for executing hooks when the database transaction is committed. Thanks <a href=\"https://github.com/amcclosky\"><code>@\u200bamcclosky</code></a>!</li>\n</ul>\n<h2>0.9.3 (October 2021)</h2>\n<!-- raw HTML omitted -->\n</blockquote>\n<p>... (truncated)</p>\n</details>\n<details>\n<summary>Commits</summary>\n<ul>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/4cc4c0b8f5688f87e520a949350519619311815a\"><code>4cc4c0b</code></a> chore: Prepare release 1.2.0</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/2ba92cec9b54ec89a29cf8870b81acb7c9c41f05\"><code>2ba92ce</code></a> Merge pull request <a href=\"https://redirect.github.com/rsinger86/django-lifecycle/issues/150\">#150</a> from rsinger86/feature/generalize-conditions</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/b8b6336e19de4a6e5eb8ad6e52dda0257e57ce94\"><code>b8b6336</code></a> refactor: Rename conditions as suggested by <a href=\"https://github.com/mateocpdev\"><code>@\u200bmateocpdev</code></a></li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/bca05776ee77cf61f16d4bc8fc145d76bc276b01\"><code>bca0577</code></a> docs: Update docs to add new way to add conditions</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/f2c6b803c5d4445d00c07ffeec7e5c73f75d0f6a\"><code>f2c6b80</code></a> feat: Allow to specify both condition or legacy params</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/b6581622b49a906b1aab6ae07c80a29d61e77d2d\"><code>b658162</code></a> feat: Make conditions chainable through <code>&</code> and <code>|</code></li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/6eb62e7ee5dbb8b2bca9c4c3f8eb0086765c4267\"><code>6eb62e7</code></a> feat: Replicate conditions tests from LifecycleMixinTests using new conditions</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/1b2ccb86a583f729ff916f473562814c30b446aa\"><code>1b2ccb8</code></a> Merge remote-tracking branch 'origin/master' into feature/generalize-conditions</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/2d1b46672b7f660012dbff08a7fe29f6d10094ed\"><code>2d1b466</code></a> Merge pull request <a href=\"https://redirect.github.com/rsinger86/django-lifecycle/issues/151\">#151</a> from AlaaNour94/capture_mutable_fields_changes</li>\n<li><a href=\"https://github.com/rsinger86/django-lifecycle/commit/98b69493e3914299d67a7eb8b05146bcfe20ea53\"><code>98b6949</code></a> capture mutable fields changes</li>\n<li>Additional commits viewable in <a href=\"https://github.com/rsinger86/django-lifecycle/compare/1.0.0...1.2.0\">compare view</a></li>\n</ul>\n</details>\n<br />\n\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n<details>\n<summary>Dependabot commands and options</summary>\n<br />\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n\n\n</details>\n", "before_files": [{"content": "import hashlib\nimport os\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\n\nfrom pulpcore.app.models import Model\n\n\nclass Upload(Model):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n\n Fields:\n\n file (models.FileField): The stored file.\n size (models.BigIntegerField): The size of the file in bytes.\n completed (models.DateTimeField): Time when the upload is committed\n \"\"\"\n\n file = models.FileField(null=False, max_length=255)\n size = models.BigIntegerField()\n completed = models.DateTimeField(null=True)\n\n def append(self, chunk, offset):\n \"\"\"\n Append a chunk to an upload.\n\n Args:\n chunk (File): Binary file to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n if not self.file:\n self.file.save(os.path.join('upload', str(self.pk)), ContentFile(''))\n\n with self.file.open(mode='r+b') as file:\n file.seek(offset)\n file.write(chunk.read())\n\n self.chunks.create(offset=offset, size=len(chunk))\n\n @property\n def sha256(self, rehash=False):\n if getattr(self, '_sha256', None) is None or rehash is True:\n sha256 = hashlib.sha256()\n with self.file.open(mode='rb') as file:\n for chunk in file.chunks():\n sha256.update(chunk)\n self._sha256 = sha256.hexdigest()\n return self._sha256\n\n\nclass UploadChunk(Model):\n \"\"\"\n A chunk for an uploaded file.\n\n Fields:\n\n upload (models.ForeignKey): Upload this chunk belongs to.\n offset (models.BigIntegerField): Start of the chunk in bytes.\n size (models.BigIntegerField): Size of the chunk in bytes.\n \"\"\"\n\n upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name='chunks')\n offset = models.BigIntegerField()\n size = models.BigIntegerField()\n", "path": "pulpcore/app/models/upload.py"}]} | 4,013 | 171 |
gh_patches_debug_11024 | rasdani/github-patches | git_diff | keras-team__autokeras-873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Autokeras1.0 task api error TypeError: __init__() got an unexpected keyword argument 'seed'
### Bug Description
i have installed autokeras1.0 from git at 12/20/2019,
when i running the code bellow, i got errors
this is the code:
```
import autokeras as ak
from keras.datasets import mnist
from sklearn import exceptions
from sklearn.metrics import confusion_matrix
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
clf = ak.ImageClassifier(max_trials=100)
clf.fit(x_train, y_train,validation_split=0.2)
y = clf.predict(x_test, y_test)
```
erros
```
Traceback (most recent call last):
File "D:/work/python_project/autokeras_test/autokerasv1.py", line 13, in <module>
clf.fit(x_train, y_train,validation_split=0.2)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\task.py", line 116, in fit
**kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\auto_model.py", line 199, in fit
**kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\tuner.py", line 138, in search
super().search(callbacks=new_callbacks, **fit_kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\kerastuner\engine\base_tuner.py", line 122, in search
self.run_trial(trial, *fit_args, **fit_kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\tuner.py", line 53, in run_trial
trial.hyperparameters)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\graph.py", line 460, in build_graphs
plain_graph = self.hyper_build(hp)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\graph.py", line 480, in hyper_build
outputs = old_block.build(hp, inputs=inputs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\base.py", line 117, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\hyperblock.py", line 62, in build
seed=self.seed)(output_node)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\preprocessor.py", line 577, in __init__
super().__init__(**kwargs)
File "D:\work\python_project\autokeras_test\venv\lib\site-packages\autokeras\hypermodel\base.py", line 100, in __init__
super().__init__(**kwargs)
TypeError: __init__() got an unexpected keyword argument 'seed'
```
### Setup Details
Include the details about the versions of:
- OS type and version: window 10
- Python: 3.6
- autokeras: 1.0
- scikit-learn:0.22
- numpy:1.7.14
- keras:2.3.1
- scipy:1.4.1
- tensorflow:2.0.0
- pytorch:1.1.0
</issue>
<code>
[start of autokeras/hypermodel/hyperblock.py]
1 from tensorflow.python.util import nest
2
3 from autokeras.hypermodel import base
4 from autokeras.hypermodel import block as block_module
5 from autokeras.hypermodel import node as node_module
6 from autokeras.hypermodel import preprocessor as preprocessor_module
7
8
9 class ImageBlock(base.HyperBlock):
10 """Block for image data.
11
12 The image blocks is a block choosing from ResNetBlock, XceptionBlock, ConvBlock,
13 which is controlled by a hyperparameter, 'block_type'.
14
15 # Arguments
16 block_type: String. 'resnet', 'xception', 'vanilla'. The type of HyperBlock
17 to use. If unspecified, it will be tuned automatically.
18 normalize: Boolean. Whether to channel-wise normalize the images.
19 If unspecified, it will be tuned automatically.
20 augment: Boolean. Whether to do image augmentation. If unspecified,
21 it will be tuned automatically.
22 """
23
24 def __init__(self,
25 block_type=None,
26 normalize=None,
27 augment=None,
28 seed=None,
29 **kwargs):
30 super().__init__(**kwargs)
31 self.block_type = block_type
32 self.normalize = normalize
33 self.augment = augment
34 self.seed = seed
35
36 def get_config(self):
37 config = super().get_config()
38 config.update({'block_type': self.block_type,
39 'normalize': self.normalize,
40 'augment': self.augment,
41 'seed': self.seed})
42 return config
43
44 def build(self, hp, inputs=None):
45 input_node = nest.flatten(inputs)[0]
46 output_node = input_node
47
48 block_type = self.block_type or hp.Choice('block_type',
49 ['resnet', 'xception', 'vanilla'],
50 default='vanilla')
51
52 normalize = self.normalize
53 if normalize is None:
54 normalize = hp.Choice('normalize', [True, False], default=True)
55 augment = self.augment
56 if augment is None:
57 augment = hp.Choice('augment', [True, False], default=False)
58 if normalize:
59 output_node = preprocessor_module.Normalization()(output_node)
60 if augment:
61 output_node = preprocessor_module.ImageAugmentation(
62 seed=self.seed)(output_node)
63 sub_block_name = self.name + '_' + block_type
64 if block_type == 'resnet':
65 output_node = block_module.ResNetBlock(name=sub_block_name)(output_node)
66 elif block_type == 'xception':
67 output_node = block_module.XceptionBlock(
68 name=sub_block_name)(output_node)
69 elif block_type == 'vanilla':
70 output_node = block_module.ConvBlock(name=sub_block_name)(output_node)
71 return output_node
72
73
74 class TextBlock(base.HyperBlock):
75 """Block for text data.
76
77 # Arguments
78 vectorizer: String. 'sequence' or 'ngram'. If it is 'sequence',
79 TextToIntSequence will be used. If it is 'ngram', TextToNgramVector will
80 be used. If unspecified, it will be tuned automatically.
81 pretraining: Boolean. Whether to use pretraining weights in the N-gram
82 vectorizer. If unspecified, it will be tuned automatically.
83 """
84
85 def __init__(self, vectorizer=None, pretraining=None, **kwargs):
86 super().__init__(**kwargs)
87 self.vectorizer = vectorizer
88 self.pretraining = pretraining
89
90 def get_config(self):
91 config = super().get_config()
92 config.update({'vectorizer': self.vectorizer,
93 'pretraining': self.pretraining})
94 return config
95
96 def build(self, hp, inputs=None):
97 input_node = nest.flatten(inputs)[0]
98 output_node = input_node
99 vectorizer = self.vectorizer or hp.Choice('vectorizer',
100 ['sequence', 'ngram'],
101 default='sequence')
102 if not isinstance(input_node, node_module.TextNode):
103 raise ValueError('The input_node should be a TextNode.')
104 if vectorizer == 'ngram':
105 output_node = preprocessor_module.TextToNgramVector()(output_node)
106 output_node = block_module.DenseBlock()(output_node)
107 else:
108 output_node = preprocessor_module.TextToIntSequence()(output_node)
109 output_node = block_module.EmbeddingBlock(
110 pretraining=self.pretraining)(output_node)
111 output_node = block_module.ConvBlock(separable=True)(output_node)
112 output_node = block_module.SpatialReduction()(output_node)
113 output_node = block_module.DenseBlock()(output_node)
114 return output_node
115
116
117 class StructuredDataBlock(base.HyperBlock):
118 """Block for structured data.
119
120 # Arguments
121 feature_engineering: Boolean. Whether to use feature engineering block.
122 Defaults to True. If specified as None, it will be tuned automatically.
123 module_type: String. 'dense' or 'lightgbm'. If it is 'dense', DenseBlock
124 will be used. If it is 'lightgbm', LightGBM will be used. If
125 unspecified, it will be tuned automatically.
126 seed: Int. Random seed.
127 """
128
129 def __init__(self,
130 feature_engineering=True,
131 module_type=None,
132 seed=None,
133 **kwargs):
134 super().__init__(**kwargs)
135 self.feature_engineering = feature_engineering
136 self.module_type = module_type
137 self.num_heads = None
138 self.seed = seed
139
140 def get_config(self):
141 config = super().get_config()
142 config.update({'feature_engineering': self.feature_engineering,
143 'module_type': self.module_type,
144 'seed': self.seed})
145 return config
146
147 def get_state(self):
148 state = super().get_state()
149 state.update({'num_heads': self.num_heads})
150 return state
151
152 def set_state(self, state):
153 super().set_state(state)
154 self.num_heads = state.get('num_heads')
155
156 def build_feature_engineering(self, hp, input_node):
157 output_node = input_node
158 feature_engineering = self.feature_engineering
159 if feature_engineering is None:
160 # TODO: If False, use plain label encoding.
161 feature_engineering = hp.Choice('feature_engineering',
162 [True],
163 default=True)
164 if feature_engineering:
165 output_node = preprocessor_module.FeatureEngineering()(output_node)
166 return output_node
167
168 def build_body(self, hp, input_node):
169 if self.num_heads > 1:
170 module_type_choices = ['dense']
171 else:
172 module_type_choices = ['lightgbm', 'dense']
173 module_type = self.module_type or hp.Choice('module_type',
174 module_type_choices,
175 default=module_type_choices[0])
176 if module_type == 'dense':
177 output_node = block_module.DenseBlock()(input_node)
178 elif module_type == 'lightgbm':
179 output_node = preprocessor_module.LightGBM(
180 seed=self.seed)(input_node)
181 else:
182 raise ValueError('Unsupported module'
183 'type: {module_type}'.format(
184 module_type=module_type))
185 nest.flatten(output_node)[0].shape = self.output_shape
186 return output_node
187
188 def build(self, hp, inputs=None):
189 input_node = nest.flatten(inputs)[0]
190 output_node = self.build_feature_engineering(hp, input_node)
191 output_node = self.build_body(hp, output_node)
192 return output_node
193
194
195 class TimeSeriesBlock(base.HyperBlock):
196
197 def build(self, hp, inputs=None):
198 raise NotImplementedError
199
200
201 class GeneralBlock(base.HyperBlock):
202 """A general neural network block when the input type is unknown.
203
204 When the input type is unknown. The GeneralBlock would search in a large space
205 for a good model.
206
207 # Arguments
208 name: String.
209 """
210
211 def build(self, hp, inputs=None):
212 raise NotImplementedError
213
[end of autokeras/hypermodel/hyperblock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autokeras/hypermodel/hyperblock.py b/autokeras/hypermodel/hyperblock.py
--- a/autokeras/hypermodel/hyperblock.py
+++ b/autokeras/hypermodel/hyperblock.py
@@ -58,8 +58,7 @@
if normalize:
output_node = preprocessor_module.Normalization()(output_node)
if augment:
- output_node = preprocessor_module.ImageAugmentation(
- seed=self.seed)(output_node)
+ output_node = preprocessor_module.ImageAugmentation()(output_node)
sub_block_name = self.name + '_' + block_type
if block_type == 'resnet':
output_node = block_module.ResNetBlock(name=sub_block_name)(output_node)
| {"golden_diff": "diff --git a/autokeras/hypermodel/hyperblock.py b/autokeras/hypermodel/hyperblock.py\n--- a/autokeras/hypermodel/hyperblock.py\n+++ b/autokeras/hypermodel/hyperblock.py\n@@ -58,8 +58,7 @@\n if normalize:\n output_node = preprocessor_module.Normalization()(output_node)\n if augment:\n- output_node = preprocessor_module.ImageAugmentation(\n- seed=self.seed)(output_node)\n+ output_node = preprocessor_module.ImageAugmentation()(output_node)\n sub_block_name = self.name + '_' + block_type\n if block_type == 'resnet':\n output_node = block_module.ResNetBlock(name=sub_block_name)(output_node)\n", "issue": "Autokeras1.0 task api error TypeError: __init__() got an unexpected keyword argument 'seed'\n### Bug Description\r\ni have installed autokeras1.0 from git at 12/20/2019,\r\nwhen i running the code bellow, i got errors\r\n\r\nthis is the code:\r\n```\r\nimport autokeras as ak\r\nfrom keras.datasets import mnist\r\nfrom sklearn import exceptions\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\nx_train = x_train.reshape(x_train.shape + (1,))\r\nx_test = x_test.reshape(x_test.shape + (1,))\r\n\r\n\r\nclf = ak.ImageClassifier(max_trials=100)\r\nclf.fit(x_train, y_train,validation_split=0.2)\r\ny = clf.predict(x_test, y_test)\r\n```\r\n\r\nerros\r\n```\r\nTraceback (most recent call last):\r\n File \"D:/work/python_project/autokeras_test/autokerasv1.py\", line 13, in <module>\r\n clf.fit(x_train, y_train,validation_split=0.2)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\task.py\", line 116, in fit\r\n **kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\auto_model.py\", line 199, in fit\r\n **kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\tuner.py\", line 138, in search\r\n super().search(callbacks=new_callbacks, **fit_kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\kerastuner\\engine\\base_tuner.py\", line 122, in search\r\n self.run_trial(trial, *fit_args, **fit_kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\tuner.py\", line 53, in run_trial\r\n trial.hyperparameters)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\graph.py\", line 460, in build_graphs\r\n plain_graph = self.hyper_build(hp)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\graph.py\", line 480, in hyper_build\r\n outputs = old_block.build(hp, inputs=inputs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\base.py\", line 117, in _build_wrapper\r\n return self._build(hp, *args, **kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\hyperblock.py\", line 62, in build\r\n seed=self.seed)(output_node)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\preprocessor.py\", line 577, in __init__\r\n super().__init__(**kwargs)\r\n File \"D:\\work\\python_project\\autokeras_test\\venv\\lib\\site-packages\\autokeras\\hypermodel\\base.py\", line 100, in __init__\r\n super().__init__(**kwargs)\r\nTypeError: __init__() got an unexpected keyword argument 'seed'\r\n\r\n```\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: window 10\r\n - Python: 3.6\r\n - autokeras: 1.0\r\n - scikit-learn:0.22\r\n - numpy:1.7.14\r\n - keras:2.3.1\r\n - scipy:1.4.1\r\n - tensorflow:2.0.0\r\n - pytorch:1.1.0\r\n\r\n\r\n\n", "before_files": [{"content": "from tensorflow.python.util import nest\n\nfrom autokeras.hypermodel import base\nfrom autokeras.hypermodel import block as block_module\nfrom autokeras.hypermodel import node as node_module\nfrom autokeras.hypermodel import preprocessor as preprocessor_module\n\n\nclass ImageBlock(base.HyperBlock):\n \"\"\"Block for image data.\n\n The image blocks is a block choosing from ResNetBlock, XceptionBlock, ConvBlock,\n which is controlled by a hyperparameter, 'block_type'.\n\n # Arguments\n block_type: String. 'resnet', 'xception', 'vanilla'. The type of HyperBlock\n to use. If unspecified, it will be tuned automatically.\n normalize: Boolean. Whether to channel-wise normalize the images.\n If unspecified, it will be tuned automatically.\n augment: Boolean. Whether to do image augmentation. If unspecified,\n it will be tuned automatically.\n \"\"\"\n\n def __init__(self,\n block_type=None,\n normalize=None,\n augment=None,\n seed=None,\n **kwargs):\n super().__init__(**kwargs)\n self.block_type = block_type\n self.normalize = normalize\n self.augment = augment\n self.seed = seed\n\n def get_config(self):\n config = super().get_config()\n config.update({'block_type': self.block_type,\n 'normalize': self.normalize,\n 'augment': self.augment,\n 'seed': self.seed})\n return config\n\n def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)[0]\n output_node = input_node\n\n block_type = self.block_type or hp.Choice('block_type',\n ['resnet', 'xception', 'vanilla'],\n default='vanilla')\n\n normalize = self.normalize\n if normalize is None:\n normalize = hp.Choice('normalize', [True, False], default=True)\n augment = self.augment\n if augment is None:\n augment = hp.Choice('augment', [True, False], default=False)\n if normalize:\n output_node = preprocessor_module.Normalization()(output_node)\n if augment:\n output_node = preprocessor_module.ImageAugmentation(\n seed=self.seed)(output_node)\n sub_block_name = self.name + '_' + block_type\n if block_type == 'resnet':\n output_node = block_module.ResNetBlock(name=sub_block_name)(output_node)\n elif block_type == 'xception':\n output_node = block_module.XceptionBlock(\n name=sub_block_name)(output_node)\n elif block_type == 'vanilla':\n output_node = block_module.ConvBlock(name=sub_block_name)(output_node)\n return output_node\n\n\nclass TextBlock(base.HyperBlock):\n \"\"\"Block for text data.\n\n # Arguments\n vectorizer: String. 'sequence' or 'ngram'. If it is 'sequence',\n TextToIntSequence will be used. If it is 'ngram', TextToNgramVector will\n be used. If unspecified, it will be tuned automatically.\n pretraining: Boolean. Whether to use pretraining weights in the N-gram\n vectorizer. If unspecified, it will be tuned automatically.\n \"\"\"\n\n def __init__(self, vectorizer=None, pretraining=None, **kwargs):\n super().__init__(**kwargs)\n self.vectorizer = vectorizer\n self.pretraining = pretraining\n\n def get_config(self):\n config = super().get_config()\n config.update({'vectorizer': self.vectorizer,\n 'pretraining': self.pretraining})\n return config\n\n def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)[0]\n output_node = input_node\n vectorizer = self.vectorizer or hp.Choice('vectorizer',\n ['sequence', 'ngram'],\n default='sequence')\n if not isinstance(input_node, node_module.TextNode):\n raise ValueError('The input_node should be a TextNode.')\n if vectorizer == 'ngram':\n output_node = preprocessor_module.TextToNgramVector()(output_node)\n output_node = block_module.DenseBlock()(output_node)\n else:\n output_node = preprocessor_module.TextToIntSequence()(output_node)\n output_node = block_module.EmbeddingBlock(\n pretraining=self.pretraining)(output_node)\n output_node = block_module.ConvBlock(separable=True)(output_node)\n output_node = block_module.SpatialReduction()(output_node)\n output_node = block_module.DenseBlock()(output_node)\n return output_node\n\n\nclass StructuredDataBlock(base.HyperBlock):\n \"\"\"Block for structured data.\n\n # Arguments\n feature_engineering: Boolean. Whether to use feature engineering block.\n Defaults to True. If specified as None, it will be tuned automatically.\n module_type: String. 'dense' or 'lightgbm'. If it is 'dense', DenseBlock\n will be used. If it is 'lightgbm', LightGBM will be used. If\n unspecified, it will be tuned automatically.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n feature_engineering=True,\n module_type=None,\n seed=None,\n **kwargs):\n super().__init__(**kwargs)\n self.feature_engineering = feature_engineering\n self.module_type = module_type\n self.num_heads = None\n self.seed = seed\n\n def get_config(self):\n config = super().get_config()\n config.update({'feature_engineering': self.feature_engineering,\n 'module_type': self.module_type,\n 'seed': self.seed})\n return config\n\n def get_state(self):\n state = super().get_state()\n state.update({'num_heads': self.num_heads})\n return state\n\n def set_state(self, state):\n super().set_state(state)\n self.num_heads = state.get('num_heads')\n\n def build_feature_engineering(self, hp, input_node):\n output_node = input_node\n feature_engineering = self.feature_engineering\n if feature_engineering is None:\n # TODO: If False, use plain label encoding.\n feature_engineering = hp.Choice('feature_engineering',\n [True],\n default=True)\n if feature_engineering:\n output_node = preprocessor_module.FeatureEngineering()(output_node)\n return output_node\n\n def build_body(self, hp, input_node):\n if self.num_heads > 1:\n module_type_choices = ['dense']\n else:\n module_type_choices = ['lightgbm', 'dense']\n module_type = self.module_type or hp.Choice('module_type',\n module_type_choices,\n default=module_type_choices[0])\n if module_type == 'dense':\n output_node = block_module.DenseBlock()(input_node)\n elif module_type == 'lightgbm':\n output_node = preprocessor_module.LightGBM(\n seed=self.seed)(input_node)\n else:\n raise ValueError('Unsupported module'\n 'type: {module_type}'.format(\n module_type=module_type))\n nest.flatten(output_node)[0].shape = self.output_shape\n return output_node\n\n def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)[0]\n output_node = self.build_feature_engineering(hp, input_node)\n output_node = self.build_body(hp, output_node)\n return output_node\n\n\nclass TimeSeriesBlock(base.HyperBlock):\n\n def build(self, hp, inputs=None):\n raise NotImplementedError\n\n\nclass GeneralBlock(base.HyperBlock):\n \"\"\"A general neural network block when the input type is unknown.\n\n When the input type is unknown. The GeneralBlock would search in a large space\n for a good model.\n\n # Arguments\n name: String.\n \"\"\"\n\n def build(self, hp, inputs=None):\n raise NotImplementedError\n", "path": "autokeras/hypermodel/hyperblock.py"}]} | 3,675 | 164 |
gh_patches_debug_1969 | rasdani/github-patches | git_diff | kserve__kserve-1053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tabular Explainer e2e test failing
/kind bug
```
(base) C02YJ034JGH5:~ dsun20$ kubectl logs isvc-explainer-tabular-explainer-default-7cnkj-deployment-4q4hn -n kfserving-ci-e2e-test kfserving-container
[I 200828 13:12:28 font_manager:1423] Generating new fontManager, this may take some time...
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 183, in _run_module_as_main
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
File "/usr/local/lib/python3.7/runpy.py", line 142, in _get_module_details
return _get_module_details(pkg_main_name, error)
File "/usr/local/lib/python3.7/runpy.py", line 109, in _get_module_details
__import__(pkg_name)
File "/alibiexplainer/alibiexplainer/__init__.py", line 15, in <module>
from .explainer import AlibiExplainer
File "/alibiexplainer/alibiexplainer/explainer.py", line 21, in <module>
from alibiexplainer.anchor_images import AnchorImages
File "/alibiexplainer/alibiexplainer/anchor_images.py", line 17, in <module>
import alibi
File "/usr/local/lib/python3.7/site-packages/alibi/__init__.py", line 1, in <module>
from . import confidence, datasets, explainers, utils
File "/usr/local/lib/python3.7/site-packages/alibi/explainers/__init__.py", line 11, in <module>
from .kernel_shap import KernelShap
File "/usr/local/lib/python3.7/site-packages/alibi/explainers/kernel_shap.py", line 11, in <module>
from shap.common import DenseData, DenseDataWithIndex
ModuleNotFoundError: No module named 'shap.common'
```
**What did you expect to happen:**
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version:
- Kubeflow version:
- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
</issue>
<code>
[start of python/alibiexplainer/setup.py]
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 setup(
24 name='alibiexplainer',
25 version='0.4.0',
26 author_email='[email protected]',
27 license='../../LICENSE.txt',
28 url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',
29 description='Model Explaination Server. \
30 Not intended for use outside KFServing Frameworks Images',
31 long_description=open('README.md').read(),
32 python_requires='>=3.6',
33 packages=find_packages("alibiexplainer"),
34 install_requires=[
35 "kfserving>=0.4.0",
36 "alibi==0.4.0",
37 "scikit-learn>=0.20.3",
38 "argparse>=1.4.0",
39 "requests>=2.22.0",
40 "joblib>=0.13.2",
41 "pandas>=0.24.2",
42 "numpy>=1.16.3",
43 "dill>=0.3.0",
44 "spacy>=2.1.4"
45 ],
46 tests_require=tests_require,
47 extras_require={'test': tests_require}
48 )
49
[end of python/alibiexplainer/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py
--- a/python/alibiexplainer/setup.py
+++ b/python/alibiexplainer/setup.py
@@ -32,6 +32,7 @@
python_requires='>=3.6',
packages=find_packages("alibiexplainer"),
install_requires=[
+ "shap==0.35",
"kfserving>=0.4.0",
"alibi==0.4.0",
"scikit-learn>=0.20.3",
| {"golden_diff": "diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py\n--- a/python/alibiexplainer/setup.py\n+++ b/python/alibiexplainer/setup.py\n@@ -32,6 +32,7 @@\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n+ \"shap==0.35\",\n \"kfserving>=0.4.0\",\n \"alibi==0.4.0\",\n \"scikit-learn>=0.20.3\",\n", "issue": "Tabular Explainer e2e test failing\n/kind bug\r\n\r\n```\r\n(base) C02YJ034JGH5:~ dsun20$ kubectl logs isvc-explainer-tabular-explainer-default-7cnkj-deployment-4q4hn -n kfserving-ci-e2e-test kfserving-container\r\n[I 200828 13:12:28 font_manager:1423] Generating new fontManager, this may take some time...\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 183, in _run_module_as_main\r\n mod_name, mod_spec, code = _get_module_details(mod_name, _Error)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 142, in _get_module_details\r\n return _get_module_details(pkg_main_name, error)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 109, in _get_module_details\r\n __import__(pkg_name)\r\n File \"/alibiexplainer/alibiexplainer/__init__.py\", line 15, in <module>\r\n from .explainer import AlibiExplainer\r\n File \"/alibiexplainer/alibiexplainer/explainer.py\", line 21, in <module>\r\n from alibiexplainer.anchor_images import AnchorImages\r\n File \"/alibiexplainer/alibiexplainer/anchor_images.py\", line 17, in <module>\r\n import alibi\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/__init__.py\", line 1, in <module>\r\n from . import confidence, datasets, explainers, utils\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/__init__.py\", line 11, in <module>\r\n from .kernel_shap import KernelShap\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/kernel_shap.py\", line 11, in <module>\r\n from shap.common import DenseData, DenseDataWithIndex\r\nModuleNotFoundError: No module named 'shap.common'\r\n```\r\n\r\n\r\n**What did you expect to happen:**\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version:\r\n- Kubeflow version:\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.4.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.4.0\",\n \"alibi==0.4.0\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}]} | 1,643 | 123 |
gh_patches_debug_2597 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase development status to 'beta' or 'stable'.
I think we can say the project is waaaay beyond alpha. :wink:
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 try:
7 from setuptools import setup
8 except ImportError:
9 from distutils.core import setup
10
11 version = "1.1.0"
12
13 if sys.argv[-1] == 'publish':
14 os.system('python setup.py sdist upload')
15 os.system('python setup.py bdist_wheel upload')
16 sys.exit()
17
18 if sys.argv[-1] == 'tag':
19 os.system("git tag -a %s -m 'version %s'" % (version, version))
20 os.system("git push --tags")
21 sys.exit()
22
23 with open('README.rst') as readme_file:
24 readme = readme_file.read()
25
26 with open('HISTORY.rst') as history_file:
27 history = history_file.read().replace('.. :changelog:', '')
28
29 requirements = [
30 'future>=0.15.2',
31 'binaryornot>=0.2.0',
32 'jinja2>=2.7',
33 'PyYAML>=3.10',
34 'click>=5.0',
35 'whichcraft>=0.1.1'
36 ]
37
38 long_description = readme + '\n\n' + history
39
40 if sys.argv[-1] == 'readme':
41 print(long_description)
42 sys.exit()
43
44
45 setup(
46 name='cookiecutter',
47 version=version,
48 description=('A command-line utility that creates projects from project '
49 'templates, e.g. creating a Python package project from a '
50 'Python package project template.'),
51 long_description=long_description,
52 author='Audrey Roy',
53 author_email='[email protected]',
54 url='https://github.com/audreyr/cookiecutter',
55 packages=[
56 'cookiecutter',
57 ],
58 package_dir={'cookiecutter': 'cookiecutter'},
59 entry_points={
60 'console_scripts': [
61 'cookiecutter = cookiecutter.cli:main',
62 ]
63 },
64 include_package_data=True,
65 install_requires=requirements,
66 license='BSD',
67 zip_safe=False,
68 classifiers=[
69 'Development Status :: 3 - Alpha',
70 'Environment :: Console',
71 'Intended Audience :: Developers',
72 'Natural Language :: English',
73 'License :: OSI Approved :: BSD License',
74 'Programming Language :: Python',
75 'Programming Language :: Python :: 2',
76 'Programming Language :: Python :: 2.7',
77 'Programming Language :: Python :: 3',
78 'Programming Language :: Python :: 3.3',
79 'Programming Language :: Python :: 3.4',
80 'Programming Language :: Python :: 3.5',
81 'Programming Language :: Python :: Implementation :: CPython',
82 'Programming Language :: Python :: Implementation :: PyPy',
83 'Topic :: Software Development',
84 ],
85 keywords=(
86 'cookiecutter, Python, projects, project templates, Jinja2, '
87 'skeleton, scaffolding, project directory, setup.py, package, '
88 'packaging'
89 ),
90 )
91
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
license='BSD',
zip_safe=False,
classifiers=[
- 'Development Status :: 3 - Alpha',
+ 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n license='BSD',\n zip_safe=False,\n classifiers=[\n- 'Development Status :: 3 - Alpha',\n+ 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n", "issue": "Increase development status to 'beta' or 'stable'.\nI think we can say the project is waaaay beyond alpha. :wink: \n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nversion = \"1.1.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'PyYAML>=3.10',\n 'click>=5.0',\n 'whichcraft>=0.1.1'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]} | 1,372 | 90 |
gh_patches_debug_36589 | rasdani/github-patches | git_diff | freedomofpress__securedrop-146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Journalists should be able to bulk download from document server
Right now, journalists can only download one file at a time, even if there are dozens new submissions in any given session. The New Yorker team asked if we can enable bulk downloading so that journalists can download multiple files at once.
</issue>
<code>
[start of modules/deaddrop/files/deaddrop/journalist.py]
1 # -*- coding: utf-8 -*-
2 import os
3 from datetime import datetime
4 import uuid
5
6 from flask import Flask, request, render_template, send_file
7 from flask_wtf.csrf import CsrfProtect
8
9 import config, version, crypto, store, background
10
11 app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
12 app.secret_key = config.SECRET_KEY
13
14 app.jinja_env.globals['version'] = version.__version__
15
16 def get_docs(sid):
17 """Get docs associated with source id `sid` sorted by submission date"""
18 docs = []
19 for filename in os.listdir(store.path(sid)):
20 os_stat = os.stat(store.path(sid, filename))
21 docs.append(dict(
22 name=filename,
23 date=str(datetime.fromtimestamp(os_stat.st_mtime)),
24 size=os_stat.st_size,
25 ))
26 # sort by date since ordering by filename is meaningless
27 docs.sort(key=lambda x: x['date'])
28 return docs
29
30 @app.after_request
31 def no_cache(response):
32 """Minimize potential traces of site access by telling the browser not to
33 cache anything"""
34 no_cache_headers = {
35 'Cache-Control': 'no-cache, no-store, must-revalidate',
36 'Pragma': 'no-cache',
37 'Expires': '-1',
38 }
39 for header, header_value in no_cache_headers.iteritems():
40 response.headers.add(header, header_value)
41 return response
42
43 @app.route('/')
44 def index():
45 dirs = os.listdir(config.STORE_DIR)
46 cols = []
47 for d in dirs:
48 cols.append(dict(
49 name=d,
50 sid=crypto.displayid(d),
51 date=str(datetime.fromtimestamp(os.stat(store.path(d)).st_mtime)).split('.')[0]
52 ))
53 cols.sort(key=lambda x: x['date'], reverse=True)
54 return render_template('index.html', cols=cols)
55
56 @app.route('/col/<sid>')
57 def col(sid):
58 return render_template("col.html", sid=sid, codename=crypto.displayid(sid),
59 docs=get_docs(sid), haskey=crypto.getkey(sid))
60
61 @app.route('/col/<sid>/<fn>')
62 def doc(sid, fn):
63 if '..' in fn or fn.startswith('/'):
64 abort(404)
65 return send_file(store.path(sid, fn), mimetype="application/pgp-encrypted")
66
67 @app.route('/reply', methods=('POST',))
68 def reply():
69 sid, msg = request.form['sid'], request.form['msg']
70 crypto.encrypt(crypto.getkey(sid), request.form['msg'], output=
71 store.path(sid, 'reply-%s.gpg' % uuid.uuid4()))
72 return render_template('reply.html', sid=sid, codename=crypto.displayid(sid))
73
74 @app.route('/delete', methods=('POST',))
75 def delete():
76 sid = request.form['sid']
77 doc_names_selected = request.form.getlist('doc_names_selected')
78 docs_selected = [doc for doc in get_docs(sid) if doc['name'] in doc_names_selected]
79 confirm_delete = bool(request.form.get('confirm_delete', False))
80 if confirm_delete:
81 for doc in docs_selected:
82 fn = store.path(sid, doc['name'])
83 crypto.secureunlink(fn)
84 return render_template('delete.html', sid=sid, codename=crypto.displayid(sid),
85 docs_selected=docs_selected, confirm_delete=confirm_delete)
86
87 if __name__ == "__main__":
88 # TODO: make sure this gets run by the web server
89 CsrfProtect(app)
90 app.run(debug=True, port=8081)
91
[end of modules/deaddrop/files/deaddrop/journalist.py]
[start of modules/deaddrop/files/deaddrop/store.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import re
4 import config
5
6 VALIDATE_FILENAME = re.compile("^(reply-)?[a-f0-9-]+(_msg|_doc|)\.gpg$").match
7
8 class PathException(Exception):
9 '''An exception raised by `store.verify` when it encounters a bad path. A path
10 can be bad when it is not absolute, not normalized, not within
11 `config.STORE_DIR`, or doesn't match the filename format.
12 '''
13 pass
14
15 def verify(p):
16 '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and
17 matches the filename format.
18 '''
19 if not os.path.isabs(config.STORE_DIR):
20 raise PathException("config.STORE_DIR(%s) is not absolute" % (
21 config.STORE_DIR, ))
22
23 # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to
24 # '/bar', etc. We have to check that the path is normalized before checking
25 # that it starts with the `config.STORE_DIR` or else a malicious actor could
26 # append a bunch of '../../..' to access files outside of the store.
27 if not p == os.path.abspath(p):
28 raise PathException("The path is not absolute and/or normalized")
29
30 if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
31 raise PathException("Invalid directory %s" % (p, ))
32
33 filename = os.path.basename(p)
34 ext = os.path.splitext(filename)[-1]
35
36 if os.path.isfile(p):
37 if ext != '.gpg':
38 # if there's an extension, verify it's a GPG
39 raise PathException("Invalid file extension %s" % (ext, ))
40
41 if not VALIDATE_FILENAME(filename):
42 raise PathException("Invalid filename %s" % (filename, ))
43
44 def path(*s):
45 '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''
46 joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)
47 absolute = os.path.abspath(joined)
48 verify(absolute)
49 return absolute
50
51 def log(msg):
52 file(path('NOTES'), 'a').write(msg)
53
[end of modules/deaddrop/files/deaddrop/store.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py
--- a/modules/deaddrop/files/deaddrop/journalist.py
+++ b/modules/deaddrop/files/deaddrop/journalist.py
@@ -71,11 +71,22 @@
store.path(sid, 'reply-%s.gpg' % uuid.uuid4()))
return render_template('reply.html', sid=sid, codename=crypto.displayid(sid))
[email protected]('/delete', methods=('POST',))
-def delete():
[email protected]('/bulk', methods=('POST',))
+def bulk():
+ action = request.form['action']
+
sid = request.form['sid']
doc_names_selected = request.form.getlist('doc_names_selected')
- docs_selected = [doc for doc in get_docs(sid) if doc['name'] in doc_names_selected]
+ docs_selected = [ doc for doc in get_docs(sid) if doc['name'] in doc_names_selected ]
+
+ if action == 'download':
+ return bulk_download(sid, docs_selected)
+ elif action == 'delete':
+ return bulk_delete(sid, docs_selected)
+ else:
+ abort(422)
+
+def bulk_delete(sid, docs_selected):
confirm_delete = bool(request.form.get('confirm_delete', False))
if confirm_delete:
for doc in docs_selected:
@@ -84,6 +95,13 @@
return render_template('delete.html', sid=sid, codename=crypto.displayid(sid),
docs_selected=docs_selected, confirm_delete=confirm_delete)
+def bulk_download(sid, docs_selected):
+ filenames = [store.path(sid, doc['name']) for doc in docs_selected]
+ zip = store.get_bulk_archive(filenames)
+ return send_file(zip, mimetype="application/zip", attachment_filename=crypto.displayid(sid), as_attachment=True)
+
+
+
if __name__ == "__main__":
# TODO: make sure this gets run by the web server
CsrfProtect(app)
diff --git a/modules/deaddrop/files/deaddrop/store.py b/modules/deaddrop/files/deaddrop/store.py
--- a/modules/deaddrop/files/deaddrop/store.py
+++ b/modules/deaddrop/files/deaddrop/store.py
@@ -2,6 +2,9 @@
import os
import re
import config
+import zipfile
+import crypto
+import uuid
VALIDATE_FILENAME = re.compile("^(reply-)?[a-f0-9-]+(_msg|_doc|)\.gpg$").match
@@ -48,5 +51,14 @@
verify(absolute)
return absolute
+def get_bulk_archive(filenames):
+ zip_file_name = os.path.join(config.TEMP_DIR, str(uuid.uuid4()) + '.zip')
+ with zipfile.ZipFile(zip_file_name, 'w') as zip:
+ for filename in filenames:
+ verify(filename)
+ basename = os.path.basename(filename)
+ zip.write(filename, arcname=basename)
+ return zip_file_name
+
def log(msg):
file(path('NOTES'), 'a').write(msg)
| {"golden_diff": "diff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py\n--- a/modules/deaddrop/files/deaddrop/journalist.py\n+++ b/modules/deaddrop/files/deaddrop/journalist.py\n@@ -71,11 +71,22 @@\n store.path(sid, 'reply-%s.gpg' % uuid.uuid4()))\n return render_template('reply.html', sid=sid, codename=crypto.displayid(sid))\n \[email protected]('/delete', methods=('POST',))\n-def delete():\[email protected]('/bulk', methods=('POST',))\n+def bulk():\n+ action = request.form['action']\n+\n sid = request.form['sid']\n doc_names_selected = request.form.getlist('doc_names_selected')\n- docs_selected = [doc for doc in get_docs(sid) if doc['name'] in doc_names_selected]\n+ docs_selected = [ doc for doc in get_docs(sid) if doc['name'] in doc_names_selected ]\n+\n+ if action == 'download':\n+ return bulk_download(sid, docs_selected)\n+ elif action == 'delete':\n+ return bulk_delete(sid, docs_selected)\n+ else:\n+ abort(422)\n+\n+def bulk_delete(sid, docs_selected):\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n@@ -84,6 +95,13 @@\n return render_template('delete.html', sid=sid, codename=crypto.displayid(sid),\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n \n+def bulk_download(sid, docs_selected):\n+ filenames = [store.path(sid, doc['name']) for doc in docs_selected]\n+ zip = store.get_bulk_archive(filenames)\n+ return send_file(zip, mimetype=\"application/zip\", attachment_filename=crypto.displayid(sid), as_attachment=True)\n+\n+\n+ \n if __name__ == \"__main__\":\n # TODO: make sure this gets run by the web server\n CsrfProtect(app)\ndiff --git a/modules/deaddrop/files/deaddrop/store.py b/modules/deaddrop/files/deaddrop/store.py\n--- a/modules/deaddrop/files/deaddrop/store.py\n+++ b/modules/deaddrop/files/deaddrop/store.py\n@@ -2,6 +2,9 @@\n import os\n import re\n import config\n+import zipfile\n+import crypto\n+import uuid\n \n VALIDATE_FILENAME = re.compile(\"^(reply-)?[a-f0-9-]+(_msg|_doc|)\\.gpg$\").match\n \n@@ -48,5 +51,14 @@\n verify(absolute)\n return absolute\n \n+def get_bulk_archive(filenames):\n+ zip_file_name = os.path.join(config.TEMP_DIR, str(uuid.uuid4()) + '.zip')\n+ with zipfile.ZipFile(zip_file_name, 'w') as zip:\n+ for filename in filenames:\n+ verify(filename)\n+ basename = os.path.basename(filename)\n+ zip.write(filename, arcname=basename)\n+ return zip_file_name\n+\n def log(msg):\n file(path('NOTES'), 'a').write(msg)\n", "issue": "Journalists should be able to bulk download from document server\nRight now, journalists can only download one file at a time, even if there are dozens new submissions in any given session. The New Yorker team asked if we can enable bulk downloading so that journalists can download multiple files at once. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\n\nfrom flask import Flask, request, render_template, send_file\nfrom flask_wtf.csrf import CsrfProtect\n\nimport config, version, crypto, store, background\n\napp = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)\napp.secret_key = config.SECRET_KEY\n\napp.jinja_env.globals['version'] = version.__version__\n\ndef get_docs(sid):\n \"\"\"Get docs associated with source id `sid` sorted by submission date\"\"\"\n docs = []\n for filename in os.listdir(store.path(sid)):\n os_stat = os.stat(store.path(sid, filename))\n docs.append(dict(\n name=filename,\n date=str(datetime.fromtimestamp(os_stat.st_mtime)),\n size=os_stat.st_size,\n ))\n # sort by date since ordering by filename is meaningless\n docs.sort(key=lambda x: x['date'])\n return docs\n\[email protected]_request\ndef no_cache(response):\n \"\"\"Minimize potential traces of site access by telling the browser not to\n cache anything\"\"\"\n no_cache_headers = {\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '-1',\n }\n for header, header_value in no_cache_headers.iteritems():\n response.headers.add(header, header_value)\n return response\n\[email protected]('/')\ndef index():\n dirs = os.listdir(config.STORE_DIR)\n cols = []\n for d in dirs:\n cols.append(dict(\n name=d,\n sid=crypto.displayid(d),\n date=str(datetime.fromtimestamp(os.stat(store.path(d)).st_mtime)).split('.')[0]\n ))\n cols.sort(key=lambda x: x['date'], reverse=True)\n return render_template('index.html', cols=cols)\n\[email protected]('/col/<sid>')\ndef col(sid):\n return render_template(\"col.html\", sid=sid, codename=crypto.displayid(sid),\n docs=get_docs(sid), haskey=crypto.getkey(sid))\n\[email protected]('/col/<sid>/<fn>')\ndef doc(sid, fn):\n if '..' in fn or fn.startswith('/'):\n abort(404)\n return send_file(store.path(sid, fn), mimetype=\"application/pgp-encrypted\")\n\[email protected]('/reply', methods=('POST',))\ndef reply():\n sid, msg = request.form['sid'], request.form['msg']\n crypto.encrypt(crypto.getkey(sid), request.form['msg'], output=\n store.path(sid, 'reply-%s.gpg' % uuid.uuid4()))\n return render_template('reply.html', sid=sid, codename=crypto.displayid(sid))\n\[email protected]('/delete', methods=('POST',))\ndef delete():\n sid = request.form['sid']\n doc_names_selected = request.form.getlist('doc_names_selected')\n docs_selected = [doc for doc in get_docs(sid) if doc['name'] in doc_names_selected]\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n fn = store.path(sid, doc['name'])\n crypto.secureunlink(fn)\n return render_template('delete.html', sid=sid, codename=crypto.displayid(sid),\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n\nif __name__ == \"__main__\":\n # TODO: make sure this gets run by the web server\n CsrfProtect(app)\n app.run(debug=True, port=8081)\n", "path": "modules/deaddrop/files/deaddrop/journalist.py"}, {"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport config\n\nVALIDATE_FILENAME = re.compile(\"^(reply-)?[a-f0-9-]+(_msg|_doc|)\\.gpg$\").match\n\nclass PathException(Exception):\n '''An exception raised by `store.verify` when it encounters a bad path. A path\n can be bad when it is not absolute, not normalized, not within\n `config.STORE_DIR`, or doesn't match the filename format.\n '''\n pass\n\ndef verify(p):\n '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and\n matches the filename format.\n '''\n if not os.path.isabs(config.STORE_DIR):\n raise PathException(\"config.STORE_DIR(%s) is not absolute\" % (\n config.STORE_DIR, ))\n\n # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to\n # '/bar', etc. We have to check that the path is normalized before checking\n # that it starts with the `config.STORE_DIR` or else a malicious actor could\n # append a bunch of '../../..' to access files outside of the store.\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n\n if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:\n raise PathException(\"Invalid directory %s\" % (p, ))\n\n filename = os.path.basename(p)\n ext = os.path.splitext(filename)[-1]\n\n if os.path.isfile(p):\n if ext != '.gpg':\n # if there's an extension, verify it's a GPG\n raise PathException(\"Invalid file extension %s\" % (ext, ))\n\n if not VALIDATE_FILENAME(filename):\n raise PathException(\"Invalid filename %s\" % (filename, ))\n\ndef path(*s):\n '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''\n joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)\n absolute = os.path.abspath(joined)\n verify(absolute)\n return absolute\n\ndef log(msg):\n file(path('NOTES'), 'a').write(msg)\n", "path": "modules/deaddrop/files/deaddrop/store.py"}]} | 2,142 | 694 |
gh_patches_debug_10867 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2784 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Traceback when viewing certain gifs
Open the attached flow dump in mitmproxy console. When viewing the content, observe the traceback that flashes to screen. A number of issues here:
- [ ] Kaitai should be able to parse this gif. It's not technically correct, but I've seen this often, especially with single-pixel tracking gifs.
- *Even if Kaitai fails*, we shouldn't crash - the user should see an "invalid image" message (or something more informative). (#2698)
- *Even if we crash*, the traceback shouldn't be flashed to screen and disappear. This is an issue with the way we handle super-long error events in console. (#2699)
[bar.log](https://github.com/mitmproxy/mitmproxy/files/1545955/bar.log)
</issue>
<code>
[start of mitmproxy/contrib/kaitaistruct/gif.py]
1 # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
2
3 import array
4 import struct
5 import zlib
6 from enum import Enum
7 from pkg_resources import parse_version
8
9 from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
10
11
12 if parse_version(ks_version) < parse_version('0.7'):
13 raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
14
15 class Gif(KaitaiStruct):
16
17 class BlockType(Enum):
18 extension = 33
19 local_image_descriptor = 44
20 end_of_file = 59
21
22 class ExtensionLabel(Enum):
23 graphic_control = 249
24 comment = 254
25 application = 255
26 def __init__(self, _io, _parent=None, _root=None):
27 self._io = _io
28 self._parent = _parent
29 self._root = _root if _root else self
30 self.hdr = self._root.Header(self._io, self, self._root)
31 self.logical_screen_descriptor = self._root.LogicalScreenDescriptorStruct(self._io, self, self._root)
32 if self.logical_screen_descriptor.has_color_table:
33 self._raw_global_color_table = self._io.read_bytes((self.logical_screen_descriptor.color_table_size * 3))
34 io = KaitaiStream(BytesIO(self._raw_global_color_table))
35 self.global_color_table = self._root.ColorTable(io, self, self._root)
36
37 self.blocks = []
38 while not self._io.is_eof():
39 self.blocks.append(self._root.Block(self._io, self, self._root))
40
41
42 class ImageData(KaitaiStruct):
43 def __init__(self, _io, _parent=None, _root=None):
44 self._io = _io
45 self._parent = _parent
46 self._root = _root if _root else self
47 self.lzw_min_code_size = self._io.read_u1()
48 self.subblocks = self._root.Subblocks(self._io, self, self._root)
49
50
51 class ColorTableEntry(KaitaiStruct):
52 def __init__(self, _io, _parent=None, _root=None):
53 self._io = _io
54 self._parent = _parent
55 self._root = _root if _root else self
56 self.red = self._io.read_u1()
57 self.green = self._io.read_u1()
58 self.blue = self._io.read_u1()
59
60
61 class LogicalScreenDescriptorStruct(KaitaiStruct):
62 def __init__(self, _io, _parent=None, _root=None):
63 self._io = _io
64 self._parent = _parent
65 self._root = _root if _root else self
66 self.screen_width = self._io.read_u2le()
67 self.screen_height = self._io.read_u2le()
68 self.flags = self._io.read_u1()
69 self.bg_color_index = self._io.read_u1()
70 self.pixel_aspect_ratio = self._io.read_u1()
71
72 @property
73 def has_color_table(self):
74 if hasattr(self, '_m_has_color_table'):
75 return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
76
77 self._m_has_color_table = (self.flags & 128) != 0
78 return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
79
80 @property
81 def color_table_size(self):
82 if hasattr(self, '_m_color_table_size'):
83 return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
84
85 self._m_color_table_size = (2 << (self.flags & 7))
86 return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
87
88
89 class LocalImageDescriptor(KaitaiStruct):
90 def __init__(self, _io, _parent=None, _root=None):
91 self._io = _io
92 self._parent = _parent
93 self._root = _root if _root else self
94 self.left = self._io.read_u2le()
95 self.top = self._io.read_u2le()
96 self.width = self._io.read_u2le()
97 self.height = self._io.read_u2le()
98 self.flags = self._io.read_u1()
99 if self.has_color_table:
100 self._raw_local_color_table = self._io.read_bytes((self.color_table_size * 3))
101 io = KaitaiStream(BytesIO(self._raw_local_color_table))
102 self.local_color_table = self._root.ColorTable(io, self, self._root)
103
104 self.image_data = self._root.ImageData(self._io, self, self._root)
105
106 @property
107 def has_color_table(self):
108 if hasattr(self, '_m_has_color_table'):
109 return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
110
111 self._m_has_color_table = (self.flags & 128) != 0
112 return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
113
114 @property
115 def has_interlace(self):
116 if hasattr(self, '_m_has_interlace'):
117 return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None
118
119 self._m_has_interlace = (self.flags & 64) != 0
120 return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None
121
122 @property
123 def has_sorted_color_table(self):
124 if hasattr(self, '_m_has_sorted_color_table'):
125 return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None
126
127 self._m_has_sorted_color_table = (self.flags & 32) != 0
128 return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None
129
130 @property
131 def color_table_size(self):
132 if hasattr(self, '_m_color_table_size'):
133 return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
134
135 self._m_color_table_size = (2 << (self.flags & 7))
136 return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
137
138
139 class Block(KaitaiStruct):
140 def __init__(self, _io, _parent=None, _root=None):
141 self._io = _io
142 self._parent = _parent
143 self._root = _root if _root else self
144 self.block_type = self._root.BlockType(self._io.read_u1())
145 _on = self.block_type
146 if _on == self._root.BlockType.extension:
147 self.body = self._root.Extension(self._io, self, self._root)
148 elif _on == self._root.BlockType.local_image_descriptor:
149 self.body = self._root.LocalImageDescriptor(self._io, self, self._root)
150
151
152 class ColorTable(KaitaiStruct):
153 def __init__(self, _io, _parent=None, _root=None):
154 self._io = _io
155 self._parent = _parent
156 self._root = _root if _root else self
157 self.entries = []
158 while not self._io.is_eof():
159 self.entries.append(self._root.ColorTableEntry(self._io, self, self._root))
160
161
162
163 class Header(KaitaiStruct):
164 def __init__(self, _io, _parent=None, _root=None):
165 self._io = _io
166 self._parent = _parent
167 self._root = _root if _root else self
168 self.magic = self._io.ensure_fixed_contents(struct.pack('3b', 71, 73, 70))
169 self.version = (self._io.read_bytes(3)).decode(u"ASCII")
170
171
172 class ExtGraphicControl(KaitaiStruct):
173 def __init__(self, _io, _parent=None, _root=None):
174 self._io = _io
175 self._parent = _parent
176 self._root = _root if _root else self
177 self.block_size = self._io.ensure_fixed_contents(struct.pack('1b', 4))
178 self.flags = self._io.read_u1()
179 self.delay_time = self._io.read_u2le()
180 self.transparent_idx = self._io.read_u1()
181 self.terminator = self._io.ensure_fixed_contents(struct.pack('1b', 0))
182
183 @property
184 def transparent_color_flag(self):
185 if hasattr(self, '_m_transparent_color_flag'):
186 return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None
187
188 self._m_transparent_color_flag = (self.flags & 1) != 0
189 return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None
190
191 @property
192 def user_input_flag(self):
193 if hasattr(self, '_m_user_input_flag'):
194 return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None
195
196 self._m_user_input_flag = (self.flags & 2) != 0
197 return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None
198
199
200 class Subblock(KaitaiStruct):
201 def __init__(self, _io, _parent=None, _root=None):
202 self._io = _io
203 self._parent = _parent
204 self._root = _root if _root else self
205 self.num_bytes = self._io.read_u1()
206 self.bytes = self._io.read_bytes(self.num_bytes)
207
208
209 class ExtApplication(KaitaiStruct):
210 def __init__(self, _io, _parent=None, _root=None):
211 self._io = _io
212 self._parent = _parent
213 self._root = _root if _root else self
214 self.application_id = self._root.Subblock(self._io, self, self._root)
215 self.subblocks = []
216 while True:
217 _ = self._root.Subblock(self._io, self, self._root)
218 self.subblocks.append(_)
219 if _.num_bytes == 0:
220 break
221
222
223 class Subblocks(KaitaiStruct):
224 def __init__(self, _io, _parent=None, _root=None):
225 self._io = _io
226 self._parent = _parent
227 self._root = _root if _root else self
228 self.entries = []
229 while True:
230 _ = self._root.Subblock(self._io, self, self._root)
231 self.entries.append(_)
232 if _.num_bytes == 0:
233 break
234
235
236 class Extension(KaitaiStruct):
237 def __init__(self, _io, _parent=None, _root=None):
238 self._io = _io
239 self._parent = _parent
240 self._root = _root if _root else self
241 self.label = self._root.ExtensionLabel(self._io.read_u1())
242 _on = self.label
243 if _on == self._root.ExtensionLabel.application:
244 self.body = self._root.ExtApplication(self._io, self, self._root)
245 elif _on == self._root.ExtensionLabel.comment:
246 self.body = self._root.Subblocks(self._io, self, self._root)
247 elif _on == self._root.ExtensionLabel.graphic_control:
248 self.body = self._root.ExtGraphicControl(self._io, self, self._root)
249 else:
250 self.body = self._root.Subblocks(self._io, self, self._root)
251
252
253
254
[end of mitmproxy/contrib/kaitaistruct/gif.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/contrib/kaitaistruct/gif.py b/mitmproxy/contrib/kaitaistruct/gif.py
--- a/mitmproxy/contrib/kaitaistruct/gif.py
+++ b/mitmproxy/contrib/kaitaistruct/gif.py
@@ -35,9 +35,11 @@
self.global_color_table = self._root.ColorTable(io, self, self._root)
self.blocks = []
- while not self._io.is_eof():
- self.blocks.append(self._root.Block(self._io, self, self._root))
-
+ while True:
+ _ = self._root.Block(self._io, self, self._root)
+ self.blocks.append(_)
+ if ((self._io.is_eof()) or (_.block_type == self._root.BlockType.end_of_file)) :
+ break
class ImageData(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
| {"golden_diff": "diff --git a/mitmproxy/contrib/kaitaistruct/gif.py b/mitmproxy/contrib/kaitaistruct/gif.py\n--- a/mitmproxy/contrib/kaitaistruct/gif.py\n+++ b/mitmproxy/contrib/kaitaistruct/gif.py\n@@ -35,9 +35,11 @@\n self.global_color_table = self._root.ColorTable(io, self, self._root)\n \n self.blocks = []\n- while not self._io.is_eof():\n- self.blocks.append(self._root.Block(self._io, self, self._root))\n-\n+ while True:\n+ _ = self._root.Block(self._io, self, self._root)\n+ self.blocks.append(_)\n+ if ((self._io.is_eof()) or (_.block_type == self._root.BlockType.end_of_file)) :\n+ break\n \n class ImageData(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n", "issue": "Traceback when viewing certain gifs \nOpen the attached flow dump in mitmproxy console. When viewing the content, observe the traceback that flashes to screen. A number of issues here:\r\n\r\n- [ ] Kaitai should be able to parse this gif. It's not technically correct, but I've seen this often, especially with single-pixel tracking gifs. \r\n- *Even if Kaitai fails*, we shouldn't crash - the user should see an \"invalid image\" message (or something more informative). (#2698)\r\n- *Even if we crash*, the traceback shouldn't be flashed to screen and disappear. This is an issue with the way we handle super-long error events in console. (#2699)\r\n\r\n[bar.log](https://github.com/mitmproxy/mitmproxy/files/1545955/bar.log)\r\n\n", "before_files": [{"content": "# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild\n\nimport array\nimport struct\nimport zlib\nfrom enum import Enum\nfrom pkg_resources import parse_version\n\nfrom kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO\n\n\nif parse_version(ks_version) < parse_version('0.7'):\n raise Exception(\"Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s\" % (ks_version))\n\nclass Gif(KaitaiStruct):\n\n class BlockType(Enum):\n extension = 33\n local_image_descriptor = 44\n end_of_file = 59\n\n class ExtensionLabel(Enum):\n graphic_control = 249\n comment = 254\n application = 255\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.hdr = self._root.Header(self._io, self, self._root)\n self.logical_screen_descriptor = self._root.LogicalScreenDescriptorStruct(self._io, self, self._root)\n if self.logical_screen_descriptor.has_color_table:\n self._raw_global_color_table = self._io.read_bytes((self.logical_screen_descriptor.color_table_size * 3))\n io = KaitaiStream(BytesIO(self._raw_global_color_table))\n self.global_color_table = self._root.ColorTable(io, self, self._root)\n\n self.blocks = []\n while not self._io.is_eof():\n self.blocks.append(self._root.Block(self._io, self, self._root))\n\n\n class ImageData(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.lzw_min_code_size = self._io.read_u1()\n self.subblocks = self._root.Subblocks(self._io, self, self._root)\n\n\n class ColorTableEntry(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.red = self._io.read_u1()\n self.green = self._io.read_u1()\n self.blue = self._io.read_u1()\n\n\n class LogicalScreenDescriptorStruct(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.screen_width = self._io.read_u2le()\n self.screen_height = self._io.read_u2le()\n self.flags = self._io.read_u1()\n self.bg_color_index = self._io.read_u1()\n self.pixel_aspect_ratio = self._io.read_u1()\n\n @property\n def has_color_table(self):\n if hasattr(self, '_m_has_color_table'):\n return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None\n\n self._m_has_color_table = (self.flags & 128) != 0\n return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None\n\n @property\n def color_table_size(self):\n if hasattr(self, '_m_color_table_size'):\n return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None\n\n self._m_color_table_size = (2 << (self.flags & 7))\n return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None\n\n\n class LocalImageDescriptor(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.left = self._io.read_u2le()\n self.top = self._io.read_u2le()\n self.width = self._io.read_u2le()\n self.height = self._io.read_u2le()\n self.flags = self._io.read_u1()\n if self.has_color_table:\n self._raw_local_color_table = self._io.read_bytes((self.color_table_size * 3))\n io = KaitaiStream(BytesIO(self._raw_local_color_table))\n self.local_color_table = self._root.ColorTable(io, self, self._root)\n\n self.image_data = self._root.ImageData(self._io, self, self._root)\n\n @property\n def has_color_table(self):\n if hasattr(self, '_m_has_color_table'):\n return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None\n\n self._m_has_color_table = (self.flags & 128) != 0\n return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None\n\n @property\n def has_interlace(self):\n if hasattr(self, '_m_has_interlace'):\n return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None\n\n self._m_has_interlace = (self.flags & 64) != 0\n return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None\n\n @property\n def has_sorted_color_table(self):\n if hasattr(self, '_m_has_sorted_color_table'):\n return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None\n\n self._m_has_sorted_color_table = (self.flags & 32) != 0\n return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None\n\n @property\n def color_table_size(self):\n if hasattr(self, '_m_color_table_size'):\n return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None\n\n self._m_color_table_size = (2 << (self.flags & 7))\n return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None\n\n\n class Block(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.block_type = self._root.BlockType(self._io.read_u1())\n _on = self.block_type\n if _on == self._root.BlockType.extension:\n self.body = self._root.Extension(self._io, self, self._root)\n elif _on == self._root.BlockType.local_image_descriptor:\n self.body = self._root.LocalImageDescriptor(self._io, self, self._root)\n\n\n class ColorTable(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.entries = []\n while not self._io.is_eof():\n self.entries.append(self._root.ColorTableEntry(self._io, self, self._root))\n\n\n\n class Header(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.magic = self._io.ensure_fixed_contents(struct.pack('3b', 71, 73, 70))\n self.version = (self._io.read_bytes(3)).decode(u\"ASCII\")\n\n\n class ExtGraphicControl(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.block_size = self._io.ensure_fixed_contents(struct.pack('1b', 4))\n self.flags = self._io.read_u1()\n self.delay_time = self._io.read_u2le()\n self.transparent_idx = self._io.read_u1()\n self.terminator = self._io.ensure_fixed_contents(struct.pack('1b', 0))\n\n @property\n def transparent_color_flag(self):\n if hasattr(self, '_m_transparent_color_flag'):\n return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None\n\n self._m_transparent_color_flag = (self.flags & 1) != 0\n return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None\n\n @property\n def user_input_flag(self):\n if hasattr(self, '_m_user_input_flag'):\n return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None\n\n self._m_user_input_flag = (self.flags & 2) != 0\n return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None\n\n\n class Subblock(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.num_bytes = self._io.read_u1()\n self.bytes = self._io.read_bytes(self.num_bytes)\n\n\n class ExtApplication(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.application_id = self._root.Subblock(self._io, self, self._root)\n self.subblocks = []\n while True:\n _ = self._root.Subblock(self._io, self, self._root)\n self.subblocks.append(_)\n if _.num_bytes == 0:\n break\n\n\n class Subblocks(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.entries = []\n while True:\n _ = self._root.Subblock(self._io, self, self._root)\n self.entries.append(_)\n if _.num_bytes == 0:\n break\n\n\n class Extension(KaitaiStruct):\n def __init__(self, _io, _parent=None, _root=None):\n self._io = _io\n self._parent = _parent\n self._root = _root if _root else self\n self.label = self._root.ExtensionLabel(self._io.read_u1())\n _on = self.label\n if _on == self._root.ExtensionLabel.application:\n self.body = self._root.ExtApplication(self._io, self, self._root)\n elif _on == self._root.ExtensionLabel.comment:\n self.body = self._root.Subblocks(self._io, self, self._root)\n elif _on == self._root.ExtensionLabel.graphic_control:\n self.body = self._root.ExtGraphicControl(self._io, self, self._root)\n else:\n self.body = self._root.Subblocks(self._io, self, self._root)\n\n\n\n", "path": "mitmproxy/contrib/kaitaistruct/gif.py"}]} | 3,970 | 215 |
gh_patches_debug_16695 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[META 759] Add S3 Bucket and Object Key to S3 instrumentation
See meta issue and spec for the description and details:
- Meta issue: https://github.com/elastic/apm/issues/759
- Spec issue: https://github.com/elastic/apm/issues/760
</issue>
<code>
[start of elasticapm/instrumentation/packages/botocore.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import urllib.parse
32 from collections import namedtuple
33
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
36 from elasticapm.traces import SpanType, capture_span, execution_context
37 from elasticapm.utils.disttracing import TraceParent
38 from elasticapm.utils.logging import get_logger
39
40 logger = get_logger("elasticapm.instrument")
41
42 SQS_MAX_ATTRIBUTES = 10
43
44
45 HandlerInfo = namedtuple("HandlerInfo", ("signature", "span_type", "span_subtype", "span_action", "context"))
46
47 # Used for boto3 < 1.7
48 endpoint_to_service_id = {"SNS": "SNS", "S3": "S3", "DYNAMODB": "DynamoDB", "SQS": "SQS"}
49
50
51 class BotocoreInstrumentation(AbstractInstrumentedModule):
52 name = "botocore"
53
54 instrument_list = [("botocore.client", "BaseClient._make_api_call")]
55
56 capture_span_ctx = capture_span
57
58 def _call(self, service, instance, args, kwargs):
59 """
60 This is split out from `call()` so that it can be re-used by the
61 aiobotocore instrumentation without duplicating all of this code.
62 """
63 operation_name = kwargs.get("operation_name", args[0])
64
65 parsed_url = urllib.parse.urlparse(instance.meta.endpoint_url)
66 context = {
67 "destination": {
68 "address": parsed_url.hostname,
69 "port": parsed_url.port,
70 "cloud": {"region": instance.meta.region_name},
71 }
72 }
73
74 handler_info = None
75 handler = handlers.get(service, False)
76 if handler:
77 handler_info = handler(operation_name, service, instance, args, kwargs, context)
78 if not handler_info:
79 handler_info = handle_default(operation_name, service, instance, args, kwargs, context)
80
81 return self.capture_span_ctx(
82 handler_info.signature,
83 span_type=handler_info.span_type,
84 leaf=True,
85 span_subtype=handler_info.span_subtype,
86 span_action=handler_info.span_action,
87 extra=handler_info.context,
88 )
89
90 def _get_service(self, instance):
91 service_model = instance.meta.service_model
92 if hasattr(service_model, "service_id"): # added in boto3 1.7
93 service = service_model.service_id
94 else:
95 service = service_model.service_name.upper()
96 service = endpoint_to_service_id.get(service, service)
97 return service
98
99 def call(self, module, method, wrapped, instance, args, kwargs):
100 service = self._get_service(instance)
101
102 ctx = self._call(service, instance, args, kwargs)
103 with ctx as span:
104 if service in pre_span_modifiers:
105 pre_span_modifiers[service](span, args, kwargs)
106 result = wrapped(*args, **kwargs)
107 if service in post_span_modifiers:
108 post_span_modifiers[service](span, args, kwargs, result)
109 request_id = result.get("ResponseMetadata", {}).get("RequestId")
110 if request_id:
111 span.update_context("http", {"request": {"id": request_id}})
112 return result
113
114
115 def handle_s3(operation_name, service, instance, args, kwargs, context):
116 span_type = "storage"
117 span_subtype = "s3"
118 span_action = operation_name
119 if len(args) > 1 and "Bucket" in args[1]:
120 bucket = args[1]["Bucket"]
121 else:
122 # TODO handle Access Points
123 bucket = ""
124 signature = f"S3 {operation_name} {bucket}"
125
126 context["destination"]["service"] = {"name": span_subtype, "resource": f"s3/{bucket}", "type": span_type}
127
128 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
129
130
131 def handle_dynamodb(operation_name, service, instance, args, kwargs, context):
132 span_type = "db"
133 span_subtype = "dynamodb"
134 span_action = "query"
135 if len(args) > 1 and "TableName" in args[1]:
136 table = args[1]["TableName"]
137 else:
138 table = ""
139 signature = f"DynamoDB {operation_name} {table}".rstrip()
140
141 context["db"] = {"type": "dynamodb", "instance": instance.meta.region_name}
142 if operation_name == "Query" and len(args) > 1 and "KeyConditionExpression" in args[1]:
143 context["db"]["statement"] = args[1]["KeyConditionExpression"]
144
145 context["destination"]["service"] = {"name": span_subtype, "resource": table, "type": span_type}
146 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
147
148
149 def handle_sns(operation_name, service, instance, args, kwargs, context):
150 if operation_name != "Publish":
151 # only "publish" is handled specifically, other endpoints get the default treatment
152 return False
153 span_type = "messaging"
154 span_subtype = "sns"
155 span_action = "send"
156 topic_name = ""
157 if len(args) > 1:
158 if "Name" in args[1]:
159 topic_name = args[1]["Name"]
160 if "TopicArn" in args[1]:
161 topic_name = args[1]["TopicArn"].rsplit(":", maxsplit=1)[-1]
162 signature = f"SNS {operation_name} {topic_name}".rstrip()
163 context["destination"]["service"] = {
164 "name": span_subtype,
165 "resource": f"{span_subtype}/{topic_name}" if topic_name else span_subtype,
166 "type": span_type,
167 }
168 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
169
170
171 SQS_OPERATIONS = {
172 "SendMessage": {"span_action": "send", "signature": "SEND to"},
173 "SendMessageBatch": {"span_action": "send_batch", "signature": "SEND_BATCH to"},
174 "ReceiveMessage": {"span_action": "receive", "signature": "RECEIVE from"},
175 "DeleteMessage": {"span_action": "delete", "signature": "DELETE from"},
176 "DeleteMessageBatch": {"span_action": "delete_batch", "signature": "DELETE_BATCH from"},
177 }
178
179
180 def handle_sqs(operation_name, service, instance, args, kwargs, context):
181 op = SQS_OPERATIONS.get(operation_name, None)
182 if not op:
183 # only "publish" is handled specifically, other endpoints get the default treatment
184 return False
185 span_type = "messaging"
186 span_subtype = "sqs"
187 topic_name = ""
188
189 if len(args) > 1:
190 topic_name = args[1]["QueueUrl"].rsplit("/", maxsplit=1)[-1]
191 signature = f"SQS {op['signature']} {topic_name}".rstrip() if topic_name else f"SQS {op['signature']}"
192 context["destination"]["service"] = {
193 "name": span_subtype,
194 "resource": f"{span_subtype}/{topic_name}" if topic_name else span_subtype,
195 "type": span_type,
196 }
197 return HandlerInfo(signature, span_type, span_subtype, op["span_action"], context)
198
199
200 def modify_span_sqs_pre(span, args, kwargs):
201 operation_name = kwargs.get("operation_name", args[0])
202 if span.id:
203 trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)
204 else:
205 # this is a dropped span, use transaction id instead
206 transaction = execution_context.get_transaction()
207 trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)
208 attributes = {constants.TRACEPARENT_HEADER_NAME: {"DataType": "String", "StringValue": trace_parent.to_string()}}
209 if trace_parent.tracestate:
210 attributes[constants.TRACESTATE_HEADER_NAME] = {"DataType": "String", "StringValue": trace_parent.tracestate}
211 if len(args) > 1:
212 if operation_name in ("SendMessage", "SendMessageBatch"):
213 attributes_count = len(attributes)
214 if operation_name == "SendMessage":
215 messages = [args[1]]
216 else:
217 messages = args[1]["Entries"]
218 for message in messages:
219 message["MessageAttributes"] = message.get("MessageAttributes") or {}
220 if len(message["MessageAttributes"]) + attributes_count <= SQS_MAX_ATTRIBUTES:
221 message["MessageAttributes"].update(attributes)
222 else:
223 logger.info("Not adding disttracing headers to message due to attribute limit reached")
224 elif operation_name == "ReceiveMessage":
225 message_attributes = args[1].setdefault("MessageAttributeNames", [])
226 if "All" not in message_attributes:
227 message_attributes.extend([constants.TRACEPARENT_HEADER_NAME, constants.TRACESTATE_HEADER_NAME])
228
229
230 def modify_span_sqs_post(span: SpanType, args, kwargs, result):
231 operation_name = kwargs.get("operation_name", args[0])
232 if operation_name == "ReceiveMessage" and "Messages" in result:
233 for message in result["Messages"][:1000]: # only up to 1000 span links are recorded
234 if "MessageAttributes" in message and constants.TRACEPARENT_HEADER_NAME in message["MessageAttributes"]:
235 tp = TraceParent.from_string(
236 message["MessageAttributes"][constants.TRACEPARENT_HEADER_NAME]["StringValue"]
237 )
238 span.add_link(tp)
239
240
241 def handle_default(operation_name, service, instance, args, kwargs, context):
242 span_type = "aws"
243 span_subtype = service.lower()
244 span_action = operation_name
245
246 context["destination"]["service"] = {"name": span_subtype, "resource": span_subtype, "type": span_type}
247
248 signature = f"{service}:{operation_name}"
249 return HandlerInfo(signature, span_type, span_subtype, span_action, context)
250
251
252 handlers = {
253 "S3": handle_s3,
254 "DynamoDB": handle_dynamodb,
255 "SNS": handle_sns,
256 "SQS": handle_sqs,
257 "default": handle_default,
258 }
259
260 pre_span_modifiers = {
261 "SQS": modify_span_sqs_pre,
262 }
263
264 post_span_modifiers = {
265 "SQS": modify_span_sqs_post,
266 }
267
[end of elasticapm/instrumentation/packages/botocore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py
--- a/elasticapm/instrumentation/packages/botocore.py
+++ b/elasticapm/instrumentation/packages/botocore.py
@@ -116,11 +116,19 @@
span_type = "storage"
span_subtype = "s3"
span_action = operation_name
- if len(args) > 1 and "Bucket" in args[1]:
- bucket = args[1]["Bucket"]
+ if len(args) > 1:
+ bucket = args[1].get("Bucket", "")
+ key = args[1].get("Key", "")
else:
# TODO handle Access Points
bucket = ""
+ key = ""
+ if bucket or key:
+ context["otel_attributes"] = {}
+ if bucket:
+ context["otel_attributes"]["aws.s3.bucket"] = bucket
+ if key:
+ context["otel_attributes"]["aws.s3.key"] = key
signature = f"S3 {operation_name} {bucket}"
context["destination"]["service"] = {"name": span_subtype, "resource": f"s3/{bucket}", "type": span_type}
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/botocore.py b/elasticapm/instrumentation/packages/botocore.py\n--- a/elasticapm/instrumentation/packages/botocore.py\n+++ b/elasticapm/instrumentation/packages/botocore.py\n@@ -116,11 +116,19 @@\n span_type = \"storage\"\n span_subtype = \"s3\"\n span_action = operation_name\n- if len(args) > 1 and \"Bucket\" in args[1]:\n- bucket = args[1][\"Bucket\"]\n+ if len(args) > 1:\n+ bucket = args[1].get(\"Bucket\", \"\")\n+ key = args[1].get(\"Key\", \"\")\n else:\n # TODO handle Access Points\n bucket = \"\"\n+ key = \"\"\n+ if bucket or key:\n+ context[\"otel_attributes\"] = {}\n+ if bucket:\n+ context[\"otel_attributes\"][\"aws.s3.bucket\"] = bucket\n+ if key:\n+ context[\"otel_attributes\"][\"aws.s3.key\"] = key\n signature = f\"S3 {operation_name} {bucket}\"\n \n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": f\"s3/{bucket}\", \"type\": span_type}\n", "issue": "[META 759] Add S3 Bucket and Object Key to S3 instrumentation\nSee meta issue and spec for the description and details:\r\n- Meta issue: https://github.com/elastic/apm/issues/759\r\n- Spec issue: https://github.com/elastic/apm/issues/760\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport urllib.parse\nfrom collections import namedtuple\n\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import SpanType, capture_span, execution_context\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nSQS_MAX_ATTRIBUTES = 10\n\n\nHandlerInfo = namedtuple(\"HandlerInfo\", (\"signature\", \"span_type\", \"span_subtype\", \"span_action\", \"context\"))\n\n# Used for boto3 < 1.7\nendpoint_to_service_id = {\"SNS\": \"SNS\", \"S3\": \"S3\", \"DYNAMODB\": \"DynamoDB\", \"SQS\": \"SQS\"}\n\n\nclass BotocoreInstrumentation(AbstractInstrumentedModule):\n name = \"botocore\"\n\n instrument_list = [(\"botocore.client\", \"BaseClient._make_api_call\")]\n\n capture_span_ctx = capture_span\n\n def _call(self, service, instance, args, kwargs):\n \"\"\"\n This is split out from `call()` so that it can be re-used by the\n aiobotocore instrumentation without duplicating all of this code.\n \"\"\"\n operation_name = kwargs.get(\"operation_name\", args[0])\n\n parsed_url = urllib.parse.urlparse(instance.meta.endpoint_url)\n context = {\n \"destination\": {\n \"address\": parsed_url.hostname,\n \"port\": parsed_url.port,\n \"cloud\": {\"region\": instance.meta.region_name},\n }\n }\n\n handler_info = None\n handler = handlers.get(service, False)\n if handler:\n handler_info = handler(operation_name, service, instance, args, kwargs, context)\n if not handler_info:\n handler_info = handle_default(operation_name, service, instance, args, kwargs, context)\n\n return self.capture_span_ctx(\n handler_info.signature,\n span_type=handler_info.span_type,\n leaf=True,\n span_subtype=handler_info.span_subtype,\n span_action=handler_info.span_action,\n extra=handler_info.context,\n )\n\n def _get_service(self, instance):\n service_model = instance.meta.service_model\n if hasattr(service_model, \"service_id\"): # added in boto3 1.7\n service = service_model.service_id\n else:\n service = service_model.service_name.upper()\n service = endpoint_to_service_id.get(service, service)\n return service\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n service = self._get_service(instance)\n\n ctx = self._call(service, instance, args, kwargs)\n with ctx as span:\n if service in pre_span_modifiers:\n pre_span_modifiers[service](span, args, kwargs)\n result = wrapped(*args, **kwargs)\n if service in post_span_modifiers:\n post_span_modifiers[service](span, args, kwargs, result)\n request_id = result.get(\"ResponseMetadata\", {}).get(\"RequestId\")\n if request_id:\n span.update_context(\"http\", {\"request\": {\"id\": request_id}})\n return result\n\n\ndef handle_s3(operation_name, service, instance, args, kwargs, context):\n span_type = \"storage\"\n span_subtype = \"s3\"\n span_action = operation_name\n if len(args) > 1 and \"Bucket\" in args[1]:\n bucket = args[1][\"Bucket\"]\n else:\n # TODO handle Access Points\n bucket = \"\"\n signature = f\"S3 {operation_name} {bucket}\"\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": f\"s3/{bucket}\", \"type\": span_type}\n\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_dynamodb(operation_name, service, instance, args, kwargs, context):\n span_type = \"db\"\n span_subtype = \"dynamodb\"\n span_action = \"query\"\n if len(args) > 1 and \"TableName\" in args[1]:\n table = args[1][\"TableName\"]\n else:\n table = \"\"\n signature = f\"DynamoDB {operation_name} {table}\".rstrip()\n\n context[\"db\"] = {\"type\": \"dynamodb\", \"instance\": instance.meta.region_name}\n if operation_name == \"Query\" and len(args) > 1 and \"KeyConditionExpression\" in args[1]:\n context[\"db\"][\"statement\"] = args[1][\"KeyConditionExpression\"]\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": table, \"type\": span_type}\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\ndef handle_sns(operation_name, service, instance, args, kwargs, context):\n if operation_name != \"Publish\":\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sns\"\n span_action = \"send\"\n topic_name = \"\"\n if len(args) > 1:\n if \"Name\" in args[1]:\n topic_name = args[1][\"Name\"]\n if \"TopicArn\" in args[1]:\n topic_name = args[1][\"TopicArn\"].rsplit(\":\", maxsplit=1)[-1]\n signature = f\"SNS {operation_name} {topic_name}\".rstrip()\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\nSQS_OPERATIONS = {\n \"SendMessage\": {\"span_action\": \"send\", \"signature\": \"SEND to\"},\n \"SendMessageBatch\": {\"span_action\": \"send_batch\", \"signature\": \"SEND_BATCH to\"},\n \"ReceiveMessage\": {\"span_action\": \"receive\", \"signature\": \"RECEIVE from\"},\n \"DeleteMessage\": {\"span_action\": \"delete\", \"signature\": \"DELETE from\"},\n \"DeleteMessageBatch\": {\"span_action\": \"delete_batch\", \"signature\": \"DELETE_BATCH from\"},\n}\n\n\ndef handle_sqs(operation_name, service, instance, args, kwargs, context):\n op = SQS_OPERATIONS.get(operation_name, None)\n if not op:\n # only \"publish\" is handled specifically, other endpoints get the default treatment\n return False\n span_type = \"messaging\"\n span_subtype = \"sqs\"\n topic_name = \"\"\n\n if len(args) > 1:\n topic_name = args[1][\"QueueUrl\"].rsplit(\"/\", maxsplit=1)[-1]\n signature = f\"SQS {op['signature']} {topic_name}\".rstrip() if topic_name else f\"SQS {op['signature']}\"\n context[\"destination\"][\"service\"] = {\n \"name\": span_subtype,\n \"resource\": f\"{span_subtype}/{topic_name}\" if topic_name else span_subtype,\n \"type\": span_type,\n }\n return HandlerInfo(signature, span_type, span_subtype, op[\"span_action\"], context)\n\n\ndef modify_span_sqs_pre(span, args, kwargs):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if span.id:\n trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)\n else:\n # this is a dropped span, use transaction id instead\n transaction = execution_context.get_transaction()\n trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)\n attributes = {constants.TRACEPARENT_HEADER_NAME: {\"DataType\": \"String\", \"StringValue\": trace_parent.to_string()}}\n if trace_parent.tracestate:\n attributes[constants.TRACESTATE_HEADER_NAME] = {\"DataType\": \"String\", \"StringValue\": trace_parent.tracestate}\n if len(args) > 1:\n if operation_name in (\"SendMessage\", \"SendMessageBatch\"):\n attributes_count = len(attributes)\n if operation_name == \"SendMessage\":\n messages = [args[1]]\n else:\n messages = args[1][\"Entries\"]\n for message in messages:\n message[\"MessageAttributes\"] = message.get(\"MessageAttributes\") or {}\n if len(message[\"MessageAttributes\"]) + attributes_count <= SQS_MAX_ATTRIBUTES:\n message[\"MessageAttributes\"].update(attributes)\n else:\n logger.info(\"Not adding disttracing headers to message due to attribute limit reached\")\n elif operation_name == \"ReceiveMessage\":\n message_attributes = args[1].setdefault(\"MessageAttributeNames\", [])\n if \"All\" not in message_attributes:\n message_attributes.extend([constants.TRACEPARENT_HEADER_NAME, constants.TRACESTATE_HEADER_NAME])\n\n\ndef modify_span_sqs_post(span: SpanType, args, kwargs, result):\n operation_name = kwargs.get(\"operation_name\", args[0])\n if operation_name == \"ReceiveMessage\" and \"Messages\" in result:\n for message in result[\"Messages\"][:1000]: # only up to 1000 span links are recorded\n if \"MessageAttributes\" in message and constants.TRACEPARENT_HEADER_NAME in message[\"MessageAttributes\"]:\n tp = TraceParent.from_string(\n message[\"MessageAttributes\"][constants.TRACEPARENT_HEADER_NAME][\"StringValue\"]\n )\n span.add_link(tp)\n\n\ndef handle_default(operation_name, service, instance, args, kwargs, context):\n span_type = \"aws\"\n span_subtype = service.lower()\n span_action = operation_name\n\n context[\"destination\"][\"service\"] = {\"name\": span_subtype, \"resource\": span_subtype, \"type\": span_type}\n\n signature = f\"{service}:{operation_name}\"\n return HandlerInfo(signature, span_type, span_subtype, span_action, context)\n\n\nhandlers = {\n \"S3\": handle_s3,\n \"DynamoDB\": handle_dynamodb,\n \"SNS\": handle_sns,\n \"SQS\": handle_sqs,\n \"default\": handle_default,\n}\n\npre_span_modifiers = {\n \"SQS\": modify_span_sqs_pre,\n}\n\npost_span_modifiers = {\n \"SQS\": modify_span_sqs_post,\n}\n", "path": "elasticapm/instrumentation/packages/botocore.py"}]} | 3,836 | 284 |
gh_patches_debug_8310 | rasdani/github-patches | git_diff | automl__auto-sklearn-322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception: batch_size must be a positive integer
I was running the following two line script:
`automl = autosklearn.classification.AutoSklearnClassifier()`
`automl.fit(X_train, y_train)`
on the Pima Indians Diabetes binary classification dataset, and it ended hours later with the following error:
```
.
.
.
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
You are already timing task: index_run28
[WARNING] [2017-06-05 23:16:26,928:smac.intensification.intensification.Intensifier] Challenger was the same as the current incumbent; Skipping challenger
[WARNING] [2017-06-05 23:16:26,928:smac.intensification.intensification.Intensifier] Challenger was the same as the current incumbent; Skipping challenger
You are already timing task: index_run28
Traceback (most recent call last):
File "auto-sklearn.py", line 21, in <module>
predictions = cls.predict(X, Y)
File "/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py", line 421, in predict
X, batch_size=batch_size, n_jobs=n_jobs)
File "/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py", line 61, in predict
return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
File "/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py", line 589, in predict
X, batch_size=batch_size, n_jobs=n_jobs)
File "/usr/local/lib/python3.6/site-packages/autosklearn/automl.py", line 545, in predict
for identifier in self.ensemble_.get_model_identifiers())
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py", line 758, in __call__
while self.dispatch_one_batch(iterator):
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py", line 608, in dispatch_one_batch
self._dispatch(tasks)
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py", line 571, in _dispatch
job = self._backend.apply_async(batch, callback=cb)
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py", line 109, in apply_async
result = ImmediateResult(func)
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py", line 326, in __init__
self.results = batch()
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py", line 131, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py", line 131, in <listcomp>
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "/usr/local/lib/python3.6/site-packages/autosklearn/automl.py", line 45, in _model_predict
prediction = model.predict_proba(X_, batch_size=batch_size)
File "/usr/local/lib/python3.6/site-packages/autosklearn/pipeline/classification.py", line 131, in predict_proba
raise Exception("batch_size must be a positive integer")
Exception: batch_size must be a positive integer
```
</issue>
<code>
[start of example/example_metrics.py]
1 # -*- encoding: utf-8 -*-
2 import numpy as np
3
4 import sklearn.model_selection
5 import sklearn.datasets
6 import sklearn.metrics
7
8 import autosklearn.classification
9 import autosklearn.metrics
10
11
12
13 def accuracy(solution, prediction):
14 # function defining accuracy
15 return np.mean(solution == prediction)
16
17
18 def accuracy_wk(solution, prediction, dummy):
19 # function defining accuracy and accepting an additional argument
20 assert dummy is None
21 return np.mean(solution == prediction)
22
23
24 def main():
25
26 X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
27 X_train, X_test, y_train, y_test = \
28 sklearn.model_selection.train_test_split(X, y, random_state=1)
29
30 # Print a list of available metrics
31 print("Available CLASSIFICATION metrics autosklearn.metrics.*:")
32 print("\t*" + "\n\t*".join(autosklearn.metrics.CLASSIFICATION_METRICS))
33
34 print("Available REGRESSION autosklearn.metrics.*:")
35 print("\t*" + "\n\t*".join(autosklearn.metrics.REGRESSION_METRICS))
36
37 # First example: Use predefined accuracy metric
38 print("#"*80)
39 print("Use predefined accuracy metric")
40 cls = autosklearn.classification.\
41 AutoSklearnClassifier(time_left_for_this_task=60,
42 per_run_time_limit=30, seed=1)
43 cls.fit(X_train, y_train, metric=autosklearn.metrics.accuracy)
44
45 predictions = cls.predict(X_test)
46 print("Accuracy score {:g} using {:s}".
47 format(sklearn.metrics.accuracy_score(y_test, predictions),
48 cls._automl._automl._metric.name))
49
50 # Second example: Use own accuracy metric
51 print("#"*80)
52 print("Use self defined accuracy accuracy metric")
53 accuracy_scorer = autosklearn.metrics.make_scorer(name="accu",
54 score_func=accuracy,
55 greater_is_better=True,
56 needs_proba=False,
57 needs_threshold=False)
58 cls = autosklearn.classification.\
59 AutoSklearnClassifier(time_left_for_this_task=60,
60 per_run_time_limit=30, seed=1)
61 cls.fit(X_train, y_train, metric=accuracy_scorer)
62
63 predictions = cls.predict(X_test)
64 print("Accuracy score {:g} using {:s}".
65 format(sklearn.metrics.accuracy_score(y_test, predictions),
66 cls._automl._automl._metric.name))
67
68 # Third example: Use own accuracy metric with additional argument
69 print("#"*80)
70 print("Use self defined accuracy with additional argument")
71 accuracy_scorer = autosklearn.metrics.make_scorer(name="accu_add",
72 score_func=accuracy_wk,
73 greater_is_better=True,
74 needs_proba=False,
75 needs_threshold=False,
76 dummy=None)
77 cls = autosklearn.classification.\
78 AutoSklearnClassifier(time_left_for_this_task=60,
79 per_run_time_limit=30, seed=1)
80 cls.fit(X_train, y_train, metric=accuracy_scorer)
81
82 predictions = cls.predict(X_test)
83 print("Accuracy score {:g} using {:s}".
84 format(sklearn.metrics.accuracy_score(y_test, predictions),
85 cls._automl._automl._metric.name))
86
87
88 if __name__ == "__main__":
89 main()
90
[end of example/example_metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/example/example_metrics.py b/example/example_metrics.py
--- a/example/example_metrics.py
+++ b/example/example_metrics.py
@@ -49,7 +49,7 @@
# Second example: Use own accuracy metric
print("#"*80)
- print("Use self defined accuracy accuracy metric")
+ print("Use self defined accuracy metric")
accuracy_scorer = autosklearn.metrics.make_scorer(name="accu",
score_func=accuracy,
greater_is_better=True,
| {"golden_diff": "diff --git a/example/example_metrics.py b/example/example_metrics.py\n--- a/example/example_metrics.py\n+++ b/example/example_metrics.py\n@@ -49,7 +49,7 @@\n \n # Second example: Use own accuracy metric\n print(\"#\"*80)\n- print(\"Use self defined accuracy accuracy metric\")\n+ print(\"Use self defined accuracy metric\")\n accuracy_scorer = autosklearn.metrics.make_scorer(name=\"accu\",\n score_func=accuracy,\n greater_is_better=True,\n", "issue": "Exception: batch_size must be a positive integer\nI was running the following two line script:\r\n\r\n`automl = autosklearn.classification.AutoSklearnClassifier()`\r\n`automl.fit(X_train, y_train)`\r\n\r\non the Pima Indians Diabetes binary classification dataset, and it ended hours later with the following error:\r\n\r\n\r\n\r\n```\r\n.\r\n.\r\n.\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\nYou are already timing task: index_run28\r\n[WARNING] [2017-06-05 23:16:26,928:smac.intensification.intensification.Intensifier] Challenger was the same as the current incumbent; Skipping challenger\r\n[WARNING] [2017-06-05 23:16:26,928:smac.intensification.intensification.Intensifier] Challenger was the same as the current incumbent; Skipping challenger\r\nYou are already timing task: index_run28\r\nTraceback (most recent call last):\r\n File \"auto-sklearn.py\", line 21, in <module>\r\n predictions = cls.predict(X, Y)\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py\", line 421, in predict\r\n X, batch_size=batch_size, n_jobs=n_jobs)\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py\", line 61, in predict\r\n return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/estimators.py\", line 589, in predict\r\n X, batch_size=batch_size, n_jobs=n_jobs)\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/automl.py\", line 545, in predict\r\n for identifier in self.ensemble_.get_model_identifiers())\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py\", line 758, in __call__\r\n while self.dispatch_one_batch(iterator):\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py\", line 608, in dispatch_one_batch\r\n self._dispatch(tasks)\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py\", line 571, in _dispatch\r\n job = self._backend.apply_async(batch, callback=cb)\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py\", line 109, in apply_async\r\n result = ImmediateResult(func)\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py\", line 326, in __init__\r\n self.results = batch()\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py\", line 131, in __call__\r\n return [func(*args, **kwargs) for func, args, kwargs in self.items]\r\n File \"/usr/local/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py\", line 131, in <listcomp>\r\n return [func(*args, **kwargs) for func, args, kwargs in self.items]\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/automl.py\", line 45, in _model_predict\r\n prediction = model.predict_proba(X_, batch_size=batch_size)\r\n File \"/usr/local/lib/python3.6/site-packages/autosklearn/pipeline/classification.py\", line 131, in predict_proba\r\n raise Exception(\"batch_size must be a positive integer\")\r\nException: batch_size must be a positive integer\r\n```\r\n\r\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport numpy as np\n\nimport sklearn.model_selection\nimport sklearn.datasets\nimport sklearn.metrics\n\nimport autosklearn.classification\nimport autosklearn.metrics\n\n\n\ndef accuracy(solution, prediction):\n # function defining accuracy\n return np.mean(solution == prediction)\n\n\ndef accuracy_wk(solution, prediction, dummy):\n # function defining accuracy and accepting an additional argument\n assert dummy is None\n return np.mean(solution == prediction)\n\n\ndef main():\n\n X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = \\\n sklearn.model_selection.train_test_split(X, y, random_state=1)\n\n # Print a list of available metrics\n print(\"Available CLASSIFICATION metrics autosklearn.metrics.*:\")\n print(\"\\t*\" + \"\\n\\t*\".join(autosklearn.metrics.CLASSIFICATION_METRICS))\n\n print(\"Available REGRESSION autosklearn.metrics.*:\")\n print(\"\\t*\" + \"\\n\\t*\".join(autosklearn.metrics.REGRESSION_METRICS))\n\n # First example: Use predefined accuracy metric\n print(\"#\"*80)\n print(\"Use predefined accuracy metric\")\n cls = autosklearn.classification.\\\n AutoSklearnClassifier(time_left_for_this_task=60,\n per_run_time_limit=30, seed=1)\n cls.fit(X_train, y_train, metric=autosklearn.metrics.accuracy)\n\n predictions = cls.predict(X_test)\n print(\"Accuracy score {:g} using {:s}\".\n format(sklearn.metrics.accuracy_score(y_test, predictions),\n cls._automl._automl._metric.name))\n\n # Second example: Use own accuracy metric\n print(\"#\"*80)\n print(\"Use self defined accuracy accuracy metric\")\n accuracy_scorer = autosklearn.metrics.make_scorer(name=\"accu\",\n score_func=accuracy,\n greater_is_better=True,\n needs_proba=False,\n needs_threshold=False)\n cls = autosklearn.classification.\\\n AutoSklearnClassifier(time_left_for_this_task=60,\n per_run_time_limit=30, seed=1)\n cls.fit(X_train, y_train, metric=accuracy_scorer)\n\n predictions = cls.predict(X_test)\n print(\"Accuracy score {:g} using {:s}\".\n format(sklearn.metrics.accuracy_score(y_test, predictions),\n cls._automl._automl._metric.name))\n\n # Third example: Use own accuracy metric with additional argument\n print(\"#\"*80)\n print(\"Use self defined accuracy with additional argument\")\n accuracy_scorer = autosklearn.metrics.make_scorer(name=\"accu_add\",\n score_func=accuracy_wk,\n greater_is_better=True,\n needs_proba=False,\n needs_threshold=False,\n dummy=None)\n cls = autosklearn.classification.\\\n AutoSklearnClassifier(time_left_for_this_task=60,\n per_run_time_limit=30, seed=1)\n cls.fit(X_train, y_train, metric=accuracy_scorer)\n\n predictions = cls.predict(X_test)\n print(\"Accuracy score {:g} using {:s}\".\n format(sklearn.metrics.accuracy_score(y_test, predictions),\n cls._automl._automl._metric.name))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "example/example_metrics.py"}]} | 2,405 | 111 |
gh_patches_debug_51622 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Organisation report shown in project reports page
The "Project overview" report is displayed on the project report page, which is an organisation report and should not be displayed on the project report page.
</issue>
<code>
[start of akvo/rest/views/report.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models import Q
8 from django.shortcuts import get_object_or_404
9 from rest_framework import status
10 from rest_framework.decorators import api_view
11 from rest_framework.response import Response
12
13 from akvo.rsr.models import Report, ReportFormat, Project
14 from ..serializers import ReportSerializer, ReportFormatSerializer
15 from ..viewsets import BaseRSRViewSet
16
17
18 class ReportViewSet(BaseRSRViewSet):
19 """Viewset providing Result data."""
20
21 queryset = Report.objects.prefetch_related(
22 'organisations',
23 'formats',
24 )
25 serializer_class = ReportSerializer
26
27 def get_queryset(self):
28 """
29 Allow custom filter for sync_owner, since this field has been replaced by the
30 reporting org partnership.
31 """
32 reports = super(ReportViewSet, self).get_queryset()
33 user = self.request.user
34 is_admin = user.is_active and (user.is_superuser or user.is_admin)
35 if not is_admin:
36 # Show only those reports that the user is allowed to see
37 approved_orgs = user.approved_organisations() if not user.is_anonymous() else []
38 reports = reports.filter(
39 Q(organisations=None) | Q(organisations__in=approved_orgs)
40 ).distinct()
41 return reports
42
43
44 @api_view(['GET'])
45 def report_formats(request):
46 """
47 A view for displaying all report format information.
48 """
49 return Response({
50 'count': ReportFormat.objects.all().count(),
51 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],
52 })
53
54
55 @api_view(['GET'])
56 def project_reports(request, project_pk):
57 """A view for displaying project specific reports."""
58
59 project = get_object_or_404(Project, pk=project_pk)
60 reports = Report.objects.prefetch_related('formats', 'organisations')\
61 .filter(url__icontains='project')
62
63 user = request.user
64 if not user.has_perm('rsr.view_project', project):
65 return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)
66
67 is_admin = user.is_active and (user.is_superuser or user.is_admin)
68
69 if not is_admin:
70 partners_org = project.partner_organisation_pks()
71 reports = reports.filter(
72 Q(organisations=None) | Q(organisations__in=partners_org)
73 )
74
75 serializer = ReportSerializer(reports.distinct(), many=True)
76 return Response(serializer.data)
77
[end of akvo/rest/views/report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -58,7 +58,7 @@
project = get_object_or_404(Project, pk=project_pk)
reports = Report.objects.prefetch_related('formats', 'organisations')\
- .filter(url__icontains='project')
+ .filter(url__icontains='{project}')
user = request.user
if not user.has_perm('rsr.view_project', project):
| {"golden_diff": "diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py\n--- a/akvo/rest/views/report.py\n+++ b/akvo/rest/views/report.py\n@@ -58,7 +58,7 @@\n \n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n- .filter(url__icontains='project')\n+ .filter(url__icontains='{project}')\n \n user = request.user\n if not user.has_perm('rsr.view_project', project):\n", "issue": "Organisation report shown in project reports page\nThe \"Project overview\" report is displayed on the project report page, which is an organisation report and should not be displayed on the project report page.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat, Project\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ReportViewSet(BaseRSRViewSet):\n \"\"\"Viewset providing Result data.\"\"\"\n\n queryset = Report.objects.prefetch_related(\n 'organisations',\n 'formats',\n )\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n reports = super(ReportViewSet, self).get_queryset()\n user = self.request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n return reports\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n\n\n@api_view(['GET'])\ndef project_reports(request, project_pk):\n \"\"\"A view for displaying project specific reports.\"\"\"\n\n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n .filter(url__icontains='project')\n\n user = request.user\n if not user.has_perm('rsr.view_project', project):\n return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)\n\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n\n if not is_admin:\n partners_org = project.partner_organisation_pks()\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=partners_org)\n )\n\n serializer = ReportSerializer(reports.distinct(), many=True)\n return Response(serializer.data)\n", "path": "akvo/rest/views/report.py"}]} | 1,313 | 127 |
gh_patches_debug_19318 | rasdani/github-patches | git_diff | lightly-ai__lightly-587 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lightly-download fails for integer tag names
lightly-download fails for integer tag names
For the `tag_name` `1000`, the following warning appears:
> Possible bug: I get `warnings.warn(f'The specified tag {tag_name} does not exist`
The source of the problem is probably in this line:
https://github.com/lightly-ai/lightly/blob/db33b15de6f77e50b0c815c4c405a8fb371d22e7/lightly/cli/download_cli.py#L44
Current guess: Either the api sends the string as a number or the command-line tool parses the string as a number which makes the lookup fail.
</issue>
<code>
[start of lightly/cli/download_cli.py]
1 # -*- coding: utf-8 -*-
2 """**Lightly Download:** Download images from the Lightly platform.
3
4 This module contains the entrypoint for the **lightly-download**
5 command-line interface.
6 """
7
8 # Copyright (c) 2020. Lightly AG and its affiliates.
9 # All Rights Reserved
10
11 import os
12 import shutil
13 import warnings
14
15 import hydra
16 from torch.utils.hipify.hipify_python import bcolors
17 from tqdm import tqdm
18
19 import lightly.data as data
20 from lightly.cli._helpers import fix_input_path, print_as_warning
21
22 from lightly.api.utils import getenv
23 from lightly.api.api_workflow_client import ApiWorkflowClient
24 from lightly.api.bitmask import BitMask
25 from lightly.openapi_generated.swagger_client import TagData, TagArithmeticsRequest, TagArithmeticsOperation, \
26 TagBitMaskResponse
27
28
29 def _download_cli(cfg, is_cli_call=True):
30 tag_name = cfg['tag_name']
31 dataset_id = cfg['dataset_id']
32 token = cfg['token']
33
34 if not tag_name or not token or not dataset_id:
35 print_as_warning('Please specify all of the parameters tag_name, token and dataset_id')
36 print_as_warning('For help, try: lightly-download --help')
37 return
38
39 api_workflow_client = ApiWorkflowClient(
40 token=token, dataset_id=dataset_id
41 )
42
43 # get tag id
44 tag_name_id_dict = dict([tag.name, tag.id] for tag in api_workflow_client._get_all_tags())
45 tag_id = tag_name_id_dict.get(tag_name, None)
46 if tag_id is None:
47 warnings.warn(f'The specified tag {tag_name} does not exist.')
48 return
49
50 # get tag data
51 tag_data: TagData = api_workflow_client.tags_api.get_tag_by_tag_id(
52 dataset_id=dataset_id, tag_id=tag_id
53 )
54
55 if cfg["exclude_parent_tag"]:
56 parent_tag_id = tag_data.prev_tag_id
57 tag_arithmetics_request = TagArithmeticsRequest(
58 tag_id1=tag_data.id,
59 tag_id2=parent_tag_id,
60 operation=TagArithmeticsOperation.DIFFERENCE)
61 bit_mask_response: TagBitMaskResponse \
62 = api_workflow_client.tags_api.perform_tag_arithmetics(body=tag_arithmetics_request, dataset_id=dataset_id)
63 bit_mask_data = bit_mask_response.bit_mask_data
64 else:
65 bit_mask_data = tag_data.bit_mask_data
66
67 # get samples
68 chosen_samples_ids = BitMask.from_hex(bit_mask_data).to_indices()
69 samples = [api_workflow_client.filenames_on_server[i] for i in chosen_samples_ids]
70
71 # store sample names in a .txt file
72 filename = cfg['tag_name'] + '.txt'
73 with open(filename, 'w') as f:
74 for item in samples:
75 f.write("%s\n" % item)
76
77 filepath = os.path.join(os.getcwd(), filename)
78 msg = f'The list of files in tag {cfg["tag_name"]} is stored at: {bcolors.OKBLUE}{filepath}{bcolors.ENDC}'
79 print(msg, flush=True)
80
81 if not cfg['input_dir'] and cfg['output_dir']:
82 # download full images from api
83 output_dir = fix_input_path(cfg['output_dir'])
84 api_workflow_client.download_dataset(output_dir, tag_name=tag_name)
85
86 elif cfg['input_dir'] and cfg['output_dir']:
87 input_dir = fix_input_path(cfg['input_dir'])
88 output_dir = fix_input_path(cfg['output_dir'])
89 print(f'Copying files from {input_dir} to {bcolors.OKBLUE}{output_dir}{bcolors.ENDC}.')
90
91 # create a dataset from the input directory
92 dataset = data.LightlyDataset(input_dir=input_dir)
93
94 # dump the dataset in the output directory
95 dataset.dump(output_dir, samples)
96
97
98 @hydra.main(config_path='config', config_name='config')
99 def download_cli(cfg):
100 """Download images from the Lightly platform.
101
102 Args:
103 cfg:
104 The default configs are loaded from the config file.
105 To overwrite them please see the section on the config file
106 (.config.config.yaml).
107
108 Command-Line Args:
109 tag_name:
110 Download all images from the requested tag. Use initial-tag
111 to get all images from the dataset.
112 token:
113 User access token to the Lightly platform. If dataset_id
114 and token are specified, the images and embeddings are
115 uploaded to the platform.
116 dataset_id:
117 Identifier of the dataset on the Lightly platform. If
118 dataset_id and token are specified, the images and
119 embeddings are uploaded to the platform.
120 input_dir:
121 If input_dir and output_dir are specified, lightly will copy
122 all images belonging to the tag from the input_dir to the
123 output_dir.
124 output_dir:
125 If input_dir and output_dir are specified, lightly will copy
126 all images belonging to the tag from the input_dir to the
127 output_dir.
128
129 Examples:
130 >>> # download list of all files in the dataset from the Lightly platform
131 >>> lightly-download token='123' dataset_id='XYZ'
132 >>>
133 >>> # download list of all files in tag 'my-tag' from the Lightly platform
134 >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag'
135 >>>
136 >>> # download all images in tag 'my-tag' from the Lightly platform
137 >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' output_dir='my_data/'
138 >>>
139 >>> # copy all files in 'my-tag' to a new directory
140 >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' input_dir='data/' output_dir='my_data/'
141
142
143 """
144 _download_cli(cfg)
145
146
147 def entry():
148 download_cli()
149
[end of lightly/cli/download_cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/cli/download_cli.py b/lightly/cli/download_cli.py
--- a/lightly/cli/download_cli.py
+++ b/lightly/cli/download_cli.py
@@ -27,9 +27,10 @@
def _download_cli(cfg, is_cli_call=True):
- tag_name = cfg['tag_name']
- dataset_id = cfg['dataset_id']
- token = cfg['token']
+
+ tag_name = str(cfg['tag_name'])
+ dataset_id = str(cfg['dataset_id'])
+ token = str(cfg['token'])
if not tag_name or not token or not dataset_id:
print_as_warning('Please specify all of the parameters tag_name, token and dataset_id')
@@ -69,7 +70,7 @@
samples = [api_workflow_client.filenames_on_server[i] for i in chosen_samples_ids]
# store sample names in a .txt file
- filename = cfg['tag_name'] + '.txt'
+ filename = tag_name + '.txt'
with open(filename, 'w') as f:
for item in samples:
f.write("%s\n" % item)
| {"golden_diff": "diff --git a/lightly/cli/download_cli.py b/lightly/cli/download_cli.py\n--- a/lightly/cli/download_cli.py\n+++ b/lightly/cli/download_cli.py\n@@ -27,9 +27,10 @@\n \n \n def _download_cli(cfg, is_cli_call=True):\n- tag_name = cfg['tag_name']\n- dataset_id = cfg['dataset_id']\n- token = cfg['token']\n+\n+ tag_name = str(cfg['tag_name'])\n+ dataset_id = str(cfg['dataset_id'])\n+ token = str(cfg['token'])\n \n if not tag_name or not token or not dataset_id:\n print_as_warning('Please specify all of the parameters tag_name, token and dataset_id')\n@@ -69,7 +70,7 @@\n samples = [api_workflow_client.filenames_on_server[i] for i in chosen_samples_ids]\n \n # store sample names in a .txt file\n- filename = cfg['tag_name'] + '.txt'\n+ filename = tag_name + '.txt'\n with open(filename, 'w') as f:\n for item in samples:\n f.write(\"%s\\n\" % item)\n", "issue": "lightly-download fails for integer tag names\nlightly-download fails for integer tag names\r\n\r\nFor the `tag_name` `1000`, the following warning appears:\r\n> Possible bug: I get `warnings.warn(f'The specified tag {tag_name} does not exist`\r\n\r\nThe source of the problem is probably in this line:\r\nhttps://github.com/lightly-ai/lightly/blob/db33b15de6f77e50b0c815c4c405a8fb371d22e7/lightly/cli/download_cli.py#L44\r\n\r\nCurrent guess: Either the api sends the string as a number or the command-line tool parses the string as a number which makes the lookup fail.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"**Lightly Download:** Download images from the Lightly platform.\n\nThis module contains the entrypoint for the **lightly-download**\ncommand-line interface.\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport shutil\nimport warnings\n\nimport hydra\nfrom torch.utils.hipify.hipify_python import bcolors\nfrom tqdm import tqdm\n\nimport lightly.data as data\nfrom lightly.cli._helpers import fix_input_path, print_as_warning\n\nfrom lightly.api.utils import getenv\nfrom lightly.api.api_workflow_client import ApiWorkflowClient\nfrom lightly.api.bitmask import BitMask\nfrom lightly.openapi_generated.swagger_client import TagData, TagArithmeticsRequest, TagArithmeticsOperation, \\\n TagBitMaskResponse\n\n\ndef _download_cli(cfg, is_cli_call=True):\n tag_name = cfg['tag_name']\n dataset_id = cfg['dataset_id']\n token = cfg['token']\n\n if not tag_name or not token or not dataset_id:\n print_as_warning('Please specify all of the parameters tag_name, token and dataset_id')\n print_as_warning('For help, try: lightly-download --help')\n return\n\n api_workflow_client = ApiWorkflowClient(\n token=token, dataset_id=dataset_id\n )\n\n # get tag id\n tag_name_id_dict = dict([tag.name, tag.id] for tag in api_workflow_client._get_all_tags())\n tag_id = tag_name_id_dict.get(tag_name, None)\n if tag_id is None:\n warnings.warn(f'The specified tag {tag_name} does not exist.')\n return\n\n # get tag data\n tag_data: TagData = api_workflow_client.tags_api.get_tag_by_tag_id(\n dataset_id=dataset_id, tag_id=tag_id\n )\n\n if cfg[\"exclude_parent_tag\"]:\n parent_tag_id = tag_data.prev_tag_id\n tag_arithmetics_request = TagArithmeticsRequest(\n tag_id1=tag_data.id,\n tag_id2=parent_tag_id,\n operation=TagArithmeticsOperation.DIFFERENCE)\n bit_mask_response: TagBitMaskResponse \\\n = api_workflow_client.tags_api.perform_tag_arithmetics(body=tag_arithmetics_request, dataset_id=dataset_id)\n bit_mask_data = bit_mask_response.bit_mask_data\n else:\n bit_mask_data = tag_data.bit_mask_data\n\n # get samples\n chosen_samples_ids = BitMask.from_hex(bit_mask_data).to_indices()\n samples = [api_workflow_client.filenames_on_server[i] for i in chosen_samples_ids]\n\n # store sample names in a .txt file\n filename = cfg['tag_name'] + '.txt'\n with open(filename, 'w') as f:\n for item in samples:\n f.write(\"%s\\n\" % item)\n\n filepath = os.path.join(os.getcwd(), filename)\n msg = f'The list of files in tag {cfg[\"tag_name\"]} is stored at: {bcolors.OKBLUE}{filepath}{bcolors.ENDC}'\n print(msg, flush=True)\n\n if not cfg['input_dir'] and cfg['output_dir']:\n # download full images from api\n output_dir = fix_input_path(cfg['output_dir'])\n api_workflow_client.download_dataset(output_dir, tag_name=tag_name)\n\n elif cfg['input_dir'] and cfg['output_dir']:\n input_dir = fix_input_path(cfg['input_dir'])\n output_dir = fix_input_path(cfg['output_dir'])\n print(f'Copying files from {input_dir} to {bcolors.OKBLUE}{output_dir}{bcolors.ENDC}.')\n\n # create a dataset from the input directory\n dataset = data.LightlyDataset(input_dir=input_dir)\n\n # dump the dataset in the output directory\n dataset.dump(output_dir, samples)\n\n\[email protected](config_path='config', config_name='config')\ndef download_cli(cfg):\n \"\"\"Download images from the Lightly platform.\n\n Args:\n cfg:\n The default configs are loaded from the config file.\n To overwrite them please see the section on the config file \n (.config.config.yaml).\n \n Command-Line Args:\n tag_name:\n Download all images from the requested tag. Use initial-tag\n to get all images from the dataset.\n token:\n User access token to the Lightly platform. If dataset_id\n and token are specified, the images and embeddings are \n uploaded to the platform.\n dataset_id:\n Identifier of the dataset on the Lightly platform. If \n dataset_id and token are specified, the images and \n embeddings are uploaded to the platform.\n input_dir:\n If input_dir and output_dir are specified, lightly will copy\n all images belonging to the tag from the input_dir to the \n output_dir.\n output_dir:\n If input_dir and output_dir are specified, lightly will copy\n all images belonging to the tag from the input_dir to the \n output_dir.\n\n Examples:\n >>> #\u00a0download list of all files in the dataset from the Lightly platform\n >>> lightly-download token='123' dataset_id='XYZ'\n >>> \n >>> # download list of all files in tag 'my-tag' from the Lightly platform\n >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag'\n >>>\n >>> # download all images in tag 'my-tag' from the Lightly platform\n >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' output_dir='my_data/'\n >>>\n >>> # copy all files in 'my-tag' to a new directory\n >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' input_dir='data/' output_dir='my_data/'\n\n\n \"\"\"\n _download_cli(cfg)\n\n\ndef entry():\n download_cli()\n", "path": "lightly/cli/download_cli.py"}]} | 2,300 | 248 |
gh_patches_debug_40940 | rasdani/github-patches | git_diff | microsoft__MLOS-438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move to new version json schema reference resolution
The latest version 4.18.0 of jsonschema migrated to a new way of handling schema reference resolution that breaks our current code. Will need to adjust it, but for now I've pinned it to the old version in #436
See Also:
https://python-jsonschema.readthedocs.io/en/stable/referencing/#migrating-from-refresolver
</issue>
<code>
[start of mlos_bench/setup.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_bench package.
7 """
8
9 from logging import warning
10 from itertools import chain
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version # noqa: F811
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
29 # Additional tools for extra functionality.
30 'azure': ['azure-storage-file-share'],
31 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
32 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],
33 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],
34 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.
35 # Transitive extra_requires from mlos-core.
36 'flaml': ['flaml[blendsearch]'],
37 'smac': ['smac'],
38 }
39
40 # construct special 'full' extra that adds requirements for all built-in
41 # backend integrations and additional extra features.
42 extra_requires['full'] = list(set(chain(*extra_requires.values())))
43
44 extra_requires['full-tests'] = extra_requires['full'] + [
45 'pytest',
46 'pytest-forked',
47 'pytest-xdist',
48 'pytest-cov',
49 'pytest-local-badge',
50 ]
51
52 # pylint: disable=duplicate-code
53 MODULE_BASE_NAME = 'mlos_bench'
54 setup(
55 name='mlos-bench',
56 version=_VERSION,
57 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
58 package_data={
59 '': ['py.typed', '**/*.pyi'],
60 'mlos_bench': [
61 'config/**/*.md',
62 'config/**/*.jsonc',
63 'config/**/*.json',
64 'config/**/*.py',
65 'config/**/*.sh',
66 'config/**/*.cmd',
67 'config/**/*.ps1',
68 ],
69 },
70 entry_points={
71 'console_scripts': [
72 'mlos_bench = mlos_bench.run:_main',
73 ],
74 },
75 install_requires=[
76 'mlos-core==' + _VERSION,
77 'requests',
78 'json5',
79 'jsonschema',
80 'importlib_resources;python_version<"3.10"',
81 ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
82 extras_require=extra_requires,
83 author='Microsoft',
84 author_email='[email protected]',
85 description=('MLOS Bench Python interface for benchmark automation and optimization.'),
86 license='MIT',
87 keywords='',
88 url='https://aka.ms/mlos-core',
89 python_requires='>=3.8',
90 )
91
[end of mlos_bench/setup.py]
[start of mlos_bench/mlos_bench/config/schemas/config_schemas.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 A simple class for describing where to find different config schemas and validating configs against them.
7 """
8
9 import logging
10 from enum import Enum
11 from os import path, walk, environ
12 from typing import Dict, Iterator, Mapping
13
14 import json # schema files are pure json - no comments
15 import jsonschema
16
17 from mlos_bench.util import path_join
18
19 _LOG = logging.getLogger(__name__)
20
21 # The path to find all config schemas.
22 CONFIG_SCHEMA_DIR = path_join(path.dirname(__file__), abs_path=True)
23
24 # Allow skipping schema validation for tight dev cycle changes.
25 # It is used in `ConfigSchema.validate()` method below.
26 # NOTE: this may cause pytest to fail if it's expecting exceptions
27 # to be raised for invalid configs.
28 _VALIDATION_ENV_FLAG = 'MLOS_BENCH_SKIP_SCHEMA_VALIDATION'
29 _SKIP_VALIDATION = (environ.get(_VALIDATION_ENV_FLAG, 'false').lower()
30 in {'true', 'y', 'yes', 'on', '1'})
31
32
33 # Note: we separate out the SchemaStore from a class method on ConfigSchema
34 # because of issues with mypy/pylint and non-Enum-member class members.
35 class SchemaStore(Mapping):
36 """
37 A simple class for storing schemas and subschemas for the validator to reference.
38 """
39
40 # A class member mapping of schema id to schema object.
41 _SCHEMA_STORE: Dict[str, dict] = {}
42
43 def __len__(self) -> int:
44 return self._SCHEMA_STORE.__len__()
45
46 def __iter__(self) -> Iterator:
47 return self._SCHEMA_STORE.__iter__()
48
49 def __getitem__(self, key: str) -> dict:
50 """Gets the schema object for the given key."""
51 if not self._SCHEMA_STORE:
52 self._load_schemas()
53 return self._SCHEMA_STORE[key]
54
55 @classmethod
56 def _load_schemas(cls) -> None:
57 """Loads all schemas and subschemas into the schema store for the validator to reference."""
58 for root, _, files in walk(CONFIG_SCHEMA_DIR):
59 for file_name in files:
60 if not file_name.endswith(".json"):
61 continue
62 file_path = path_join(root, file_name)
63 if path.getsize(file_path) == 0:
64 continue
65 with open(file_path, mode="r", encoding="utf-8") as schema_file:
66 schema = json.load(schema_file)
67 cls._SCHEMA_STORE[file_path] = schema
68 # Let the schema be referenced by its id as well.
69 assert "$id" in schema
70 assert schema["$id"] not in cls._SCHEMA_STORE
71 cls._SCHEMA_STORE[schema["$id"]] = schema
72
73
74 SCHEMA_STORE = SchemaStore()
75
76
77 class ConfigSchema(Enum):
78 """
79 An enum to help describe schema types and help validate configs against them.
80 """
81
82 CLI = path_join(CONFIG_SCHEMA_DIR, "cli/cli-schema.json")
83 GLOBALS = path_join(CONFIG_SCHEMA_DIR, "cli/globals-schema.json")
84 ENVIRONMENT = path_join(CONFIG_SCHEMA_DIR, "environments/environment-schema.json")
85 OPTIMIZER = path_join(CONFIG_SCHEMA_DIR, "optimizers/optimizer-schema.json")
86 SERVICE = path_join(CONFIG_SCHEMA_DIR, "services/service-schema.json")
87 STORAGE = path_join(CONFIG_SCHEMA_DIR, "storage/storage-schema.json")
88 TUNABLE_PARAMS = path_join(CONFIG_SCHEMA_DIR, "tunables/tunable-params-schema.json")
89 TUNABLE_VALUES = path_join(CONFIG_SCHEMA_DIR, "tunables/tunable-values-schema.json")
90
91 @property
92 def schema(self) -> dict:
93 """Gets the schema object for this type."""
94 schema = SCHEMA_STORE[self.value]
95 assert schema
96 return schema
97
98 def validate(self, config: dict) -> None:
99 """
100 Validates the given config against this schema.
101
102 Parameters
103 ----------
104 config : dict
105 The config to validate.
106
107 Raises
108 ------
109 jsonschema.exceptions.ValidationError
110 jsonschema.exceptions.SchemaError
111 """
112 if _SKIP_VALIDATION:
113 _LOG.warning("%s is set - skip schema validation", _VALIDATION_ENV_FLAG)
114 else:
115 resolver: jsonschema.RefResolver = jsonschema.RefResolver.from_schema(self.schema, store=SCHEMA_STORE)
116 jsonschema.validate(instance=config, schema=self.schema, resolver=resolver)
117
[end of mlos_bench/mlos_bench/config/schemas/config_schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlos_bench/mlos_bench/config/schemas/config_schemas.py b/mlos_bench/mlos_bench/config/schemas/config_schemas.py
--- a/mlos_bench/mlos_bench/config/schemas/config_schemas.py
+++ b/mlos_bench/mlos_bench/config/schemas/config_schemas.py
@@ -14,6 +14,9 @@
import json # schema files are pure json - no comments
import jsonschema
+from referencing import Registry, Resource
+from referencing.jsonschema import DRAFT202012
+
from mlos_bench.util import path_join
_LOG = logging.getLogger(__name__)
@@ -39,6 +42,7 @@
# A class member mapping of schema id to schema object.
_SCHEMA_STORE: Dict[str, dict] = {}
+ _REGISTRY: Registry = Registry()
def __len__(self) -> int:
return self._SCHEMA_STORE.__len__()
@@ -55,6 +59,8 @@
@classmethod
def _load_schemas(cls) -> None:
"""Loads all schemas and subschemas into the schema store for the validator to reference."""
+ if cls._SCHEMA_STORE:
+ return
for root, _, files in walk(CONFIG_SCHEMA_DIR):
for file_name in files:
if not file_name.endswith(".json"):
@@ -70,6 +76,23 @@
assert schema["$id"] not in cls._SCHEMA_STORE
cls._SCHEMA_STORE[schema["$id"]] = schema
+ @classmethod
+ def _load_registry(cls) -> None:
+ """Also store them in a Registry object for referencing by recent versions of jsonschema."""
+ if not cls._SCHEMA_STORE:
+ cls._load_schemas()
+ cls._REGISTRY = Registry().with_resources([
+ (url, Resource.from_contents(schema, default_specification=DRAFT202012))
+ for url, schema in cls._SCHEMA_STORE.items()
+ ])
+
+ @property
+ def registry(self) -> Registry:
+ """Returns a Registry object with all the schemas loaded."""
+ if not self._REGISTRY:
+ self._load_registry()
+ return self._REGISTRY
+
SCHEMA_STORE = SchemaStore()
@@ -112,5 +135,7 @@
if _SKIP_VALIDATION:
_LOG.warning("%s is set - skip schema validation", _VALIDATION_ENV_FLAG)
else:
- resolver: jsonschema.RefResolver = jsonschema.RefResolver.from_schema(self.schema, store=SCHEMA_STORE)
- jsonschema.validate(instance=config, schema=self.schema, resolver=resolver)
+ jsonschema.Draft202012Validator(
+ schema=self.schema,
+ registry=SCHEMA_STORE.registry, # type: ignore[call-arg]
+ ).validate(config)
diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py
--- a/mlos_bench/setup.py
+++ b/mlos_bench/setup.py
@@ -76,7 +76,7 @@
'mlos-core==' + _VERSION,
'requests',
'json5',
- 'jsonschema',
+ 'jsonschema>=4.18.0', 'referencing>=0.29.1',
'importlib_resources;python_version<"3.10"',
] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
extras_require=extra_requires,
| {"golden_diff": "diff --git a/mlos_bench/mlos_bench/config/schemas/config_schemas.py b/mlos_bench/mlos_bench/config/schemas/config_schemas.py\n--- a/mlos_bench/mlos_bench/config/schemas/config_schemas.py\n+++ b/mlos_bench/mlos_bench/config/schemas/config_schemas.py\n@@ -14,6 +14,9 @@\n import json # schema files are pure json - no comments\n import jsonschema\n \n+from referencing import Registry, Resource\n+from referencing.jsonschema import DRAFT202012\n+\n from mlos_bench.util import path_join\n \n _LOG = logging.getLogger(__name__)\n@@ -39,6 +42,7 @@\n \n # A class member mapping of schema id to schema object.\n _SCHEMA_STORE: Dict[str, dict] = {}\n+ _REGISTRY: Registry = Registry()\n \n def __len__(self) -> int:\n return self._SCHEMA_STORE.__len__()\n@@ -55,6 +59,8 @@\n @classmethod\n def _load_schemas(cls) -> None:\n \"\"\"Loads all schemas and subschemas into the schema store for the validator to reference.\"\"\"\n+ if cls._SCHEMA_STORE:\n+ return\n for root, _, files in walk(CONFIG_SCHEMA_DIR):\n for file_name in files:\n if not file_name.endswith(\".json\"):\n@@ -70,6 +76,23 @@\n assert schema[\"$id\"] not in cls._SCHEMA_STORE\n cls._SCHEMA_STORE[schema[\"$id\"]] = schema\n \n+ @classmethod\n+ def _load_registry(cls) -> None:\n+ \"\"\"Also store them in a Registry object for referencing by recent versions of jsonschema.\"\"\"\n+ if not cls._SCHEMA_STORE:\n+ cls._load_schemas()\n+ cls._REGISTRY = Registry().with_resources([\n+ (url, Resource.from_contents(schema, default_specification=DRAFT202012))\n+ for url, schema in cls._SCHEMA_STORE.items()\n+ ])\n+\n+ @property\n+ def registry(self) -> Registry:\n+ \"\"\"Returns a Registry object with all the schemas loaded.\"\"\"\n+ if not self._REGISTRY:\n+ self._load_registry()\n+ return self._REGISTRY\n+\n \n SCHEMA_STORE = SchemaStore()\n \n@@ -112,5 +135,7 @@\n if _SKIP_VALIDATION:\n _LOG.warning(\"%s is set - skip schema validation\", _VALIDATION_ENV_FLAG)\n else:\n- resolver: jsonschema.RefResolver = jsonschema.RefResolver.from_schema(self.schema, store=SCHEMA_STORE)\n- jsonschema.validate(instance=config, schema=self.schema, resolver=resolver)\n+ jsonschema.Draft202012Validator(\n+ schema=self.schema,\n+ registry=SCHEMA_STORE.registry, # type: ignore[call-arg]\n+ ).validate(config)\ndiff --git a/mlos_bench/setup.py b/mlos_bench/setup.py\n--- a/mlos_bench/setup.py\n+++ b/mlos_bench/setup.py\n@@ -76,7 +76,7 @@\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n- 'jsonschema',\n+ 'jsonschema>=4.18.0', 'referencing>=0.29.1',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n", "issue": "Move to new version json schema reference resolution\nThe latest version 4.18.0 of jsonschema migrated to a new way of handling schema reference resolution that breaks our current code. Will need to adjust it, but for now I've pinned it to the old version in #436 \r\n\r\nSee Also:\r\nhttps://python-jsonschema.readthedocs.io/en/stable/referencing/#migrating-from-refresolver\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version # noqa: F811\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'flaml': ['flaml[blendsearch]'],\n 'smac': ['smac'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nA simple class for describing where to find different config schemas and validating configs against them.\n\"\"\"\n\nimport logging\nfrom enum import Enum\nfrom os import path, walk, environ\nfrom typing import Dict, Iterator, Mapping\n\nimport json # schema files are pure json - no comments\nimport jsonschema\n\nfrom mlos_bench.util import path_join\n\n_LOG = logging.getLogger(__name__)\n\n# The path to find all config schemas.\nCONFIG_SCHEMA_DIR = path_join(path.dirname(__file__), abs_path=True)\n\n# Allow skipping schema validation for tight dev cycle changes.\n# It is used in `ConfigSchema.validate()` method below.\n# NOTE: this may cause pytest to fail if it's expecting exceptions\n# to be raised for invalid configs.\n_VALIDATION_ENV_FLAG = 'MLOS_BENCH_SKIP_SCHEMA_VALIDATION'\n_SKIP_VALIDATION = (environ.get(_VALIDATION_ENV_FLAG, 'false').lower()\n in {'true', 'y', 'yes', 'on', '1'})\n\n\n# Note: we separate out the SchemaStore from a class method on ConfigSchema\n# because of issues with mypy/pylint and non-Enum-member class members.\nclass SchemaStore(Mapping):\n \"\"\"\n A simple class for storing schemas and subschemas for the validator to reference.\n \"\"\"\n\n # A class member mapping of schema id to schema object.\n _SCHEMA_STORE: Dict[str, dict] = {}\n\n def __len__(self) -> int:\n return self._SCHEMA_STORE.__len__()\n\n def __iter__(self) -> Iterator:\n return self._SCHEMA_STORE.__iter__()\n\n def __getitem__(self, key: str) -> dict:\n \"\"\"Gets the schema object for the given key.\"\"\"\n if not self._SCHEMA_STORE:\n self._load_schemas()\n return self._SCHEMA_STORE[key]\n\n @classmethod\n def _load_schemas(cls) -> None:\n \"\"\"Loads all schemas and subschemas into the schema store for the validator to reference.\"\"\"\n for root, _, files in walk(CONFIG_SCHEMA_DIR):\n for file_name in files:\n if not file_name.endswith(\".json\"):\n continue\n file_path = path_join(root, file_name)\n if path.getsize(file_path) == 0:\n continue\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as schema_file:\n schema = json.load(schema_file)\n cls._SCHEMA_STORE[file_path] = schema\n # Let the schema be referenced by its id as well.\n assert \"$id\" in schema\n assert schema[\"$id\"] not in cls._SCHEMA_STORE\n cls._SCHEMA_STORE[schema[\"$id\"]] = schema\n\n\nSCHEMA_STORE = SchemaStore()\n\n\nclass ConfigSchema(Enum):\n \"\"\"\n An enum to help describe schema types and help validate configs against them.\n \"\"\"\n\n CLI = path_join(CONFIG_SCHEMA_DIR, \"cli/cli-schema.json\")\n GLOBALS = path_join(CONFIG_SCHEMA_DIR, \"cli/globals-schema.json\")\n ENVIRONMENT = path_join(CONFIG_SCHEMA_DIR, \"environments/environment-schema.json\")\n OPTIMIZER = path_join(CONFIG_SCHEMA_DIR, \"optimizers/optimizer-schema.json\")\n SERVICE = path_join(CONFIG_SCHEMA_DIR, \"services/service-schema.json\")\n STORAGE = path_join(CONFIG_SCHEMA_DIR, \"storage/storage-schema.json\")\n TUNABLE_PARAMS = path_join(CONFIG_SCHEMA_DIR, \"tunables/tunable-params-schema.json\")\n TUNABLE_VALUES = path_join(CONFIG_SCHEMA_DIR, \"tunables/tunable-values-schema.json\")\n\n @property\n def schema(self) -> dict:\n \"\"\"Gets the schema object for this type.\"\"\"\n schema = SCHEMA_STORE[self.value]\n assert schema\n return schema\n\n def validate(self, config: dict) -> None:\n \"\"\"\n Validates the given config against this schema.\n\n Parameters\n ----------\n config : dict\n The config to validate.\n\n Raises\n ------\n jsonschema.exceptions.ValidationError\n jsonschema.exceptions.SchemaError\n \"\"\"\n if _SKIP_VALIDATION:\n _LOG.warning(\"%s is set - skip schema validation\", _VALIDATION_ENV_FLAG)\n else:\n resolver: jsonschema.RefResolver = jsonschema.RefResolver.from_schema(self.schema, store=SCHEMA_STORE)\n jsonschema.validate(instance=config, schema=self.schema, resolver=resolver)\n", "path": "mlos_bench/mlos_bench/config/schemas/config_schemas.py"}]} | 2,735 | 784 |
gh_patches_debug_16617 | rasdani/github-patches | git_diff | OCA__bank-payment-900 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[14.0] account_payment_purchase: changing many2one resets payment_mode
Hi,
We've seen that when the field purchase_vendor_bill_id is changed and a purchase is selected from it, the payment_mode_id is always reseted because it is using the reference purchase_id.
`new_mode = self.purchase_id.payment_mode_id.id or False`
We've made this change, and it seems to work as it should.
`new_mode = self.purchase_vendor_bill_id.purchase_order_id.payment_mode_id.id or False`
The same goes for the partner_bank_id field.
@MiquelRForgeFlow
</issue>
<code>
[start of account_payment_purchase/models/account_invoice.py]
1 # Copyright 2016 Akretion (<http://www.akretion.com>).
2 # Copyright 2017 Tecnativa - Vicent Cubells.
3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
4
5 from odoo import _, api, models
6
7
8 class AccountMove(models.Model):
9 _inherit = "account.move"
10
11 @api.onchange("purchase_vendor_bill_id", "purchase_id")
12 def _onchange_purchase_auto_complete(self):
13 new_mode = self.purchase_id.payment_mode_id.id or False
14 new_bank = self.purchase_id.supplier_partner_bank_id.id or False
15 res = super()._onchange_purchase_auto_complete() or {}
16 if self.payment_mode_id and new_mode and self.payment_mode_id.id != new_mode:
17 res["warning"] = {
18 "title": _("Warning"),
19 "message": _("Selected purchase order have different payment mode."),
20 }
21 return res
22 self.payment_mode_id = new_mode
23 if self.partner_bank_id and new_bank and self.partner_bank_id.id != new_bank:
24 res["warning"] = {
25 "title": _("Warning"),
26 "message": _("Selected purchase order have different supplier bank."),
27 }
28 return res
29 self.partner_bank_id = new_bank
30 return res
31
[end of account_payment_purchase/models/account_invoice.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/account_payment_purchase/models/account_invoice.py b/account_payment_purchase/models/account_invoice.py
--- a/account_payment_purchase/models/account_invoice.py
+++ b/account_payment_purchase/models/account_invoice.py
@@ -10,8 +10,16 @@
@api.onchange("purchase_vendor_bill_id", "purchase_id")
def _onchange_purchase_auto_complete(self):
- new_mode = self.purchase_id.payment_mode_id.id or False
- new_bank = self.purchase_id.supplier_partner_bank_id.id or False
+
+ new_mode = (
+ self.purchase_vendor_bill_id.purchase_order_id.payment_mode_id.id
+ or self.purchase_id.payment_mode_id.id
+ )
+ new_bank = (
+ self.purchase_vendor_bill_id.purchase_order_id.supplier_partner_bank_id.id
+ or self.purchase_id.supplier_partner_bank_id.id
+ )
+
res = super()._onchange_purchase_auto_complete() or {}
if self.payment_mode_id and new_mode and self.payment_mode_id.id != new_mode:
res["warning"] = {
| {"golden_diff": "diff --git a/account_payment_purchase/models/account_invoice.py b/account_payment_purchase/models/account_invoice.py\n--- a/account_payment_purchase/models/account_invoice.py\n+++ b/account_payment_purchase/models/account_invoice.py\n@@ -10,8 +10,16 @@\n \n @api.onchange(\"purchase_vendor_bill_id\", \"purchase_id\")\n def _onchange_purchase_auto_complete(self):\n- new_mode = self.purchase_id.payment_mode_id.id or False\n- new_bank = self.purchase_id.supplier_partner_bank_id.id or False\n+\n+ new_mode = (\n+ self.purchase_vendor_bill_id.purchase_order_id.payment_mode_id.id\n+ or self.purchase_id.payment_mode_id.id\n+ )\n+ new_bank = (\n+ self.purchase_vendor_bill_id.purchase_order_id.supplier_partner_bank_id.id\n+ or self.purchase_id.supplier_partner_bank_id.id\n+ )\n+\n res = super()._onchange_purchase_auto_complete() or {}\n if self.payment_mode_id and new_mode and self.payment_mode_id.id != new_mode:\n res[\"warning\"] = {\n", "issue": "[14.0] account_payment_purchase: changing many2one resets payment_mode\nHi,\r\nWe've seen that when the field purchase_vendor_bill_id is changed and a purchase is selected from it, the payment_mode_id is always reseted because it is using the reference purchase_id.\r\n`new_mode = self.purchase_id.payment_mode_id.id or False`\r\nWe've made this change, and it seems to work as it should.\r\n`new_mode = self.purchase_vendor_bill_id.purchase_order_id.payment_mode_id.id or False`\r\nThe same goes for the partner_bank_id field.\r\n@MiquelRForgeFlow \n", "before_files": [{"content": "# Copyright 2016 Akretion (<http://www.akretion.com>).\n# Copyright 2017 Tecnativa - Vicent Cubells.\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import _, api, models\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n @api.onchange(\"purchase_vendor_bill_id\", \"purchase_id\")\n def _onchange_purchase_auto_complete(self):\n new_mode = self.purchase_id.payment_mode_id.id or False\n new_bank = self.purchase_id.supplier_partner_bank_id.id or False\n res = super()._onchange_purchase_auto_complete() or {}\n if self.payment_mode_id and new_mode and self.payment_mode_id.id != new_mode:\n res[\"warning\"] = {\n \"title\": _(\"Warning\"),\n \"message\": _(\"Selected purchase order have different payment mode.\"),\n }\n return res\n self.payment_mode_id = new_mode\n if self.partner_bank_id and new_bank and self.partner_bank_id.id != new_bank:\n res[\"warning\"] = {\n \"title\": _(\"Warning\"),\n \"message\": _(\"Selected purchase order have different supplier bank.\"),\n }\n return res\n self.partner_bank_id = new_bank\n return res\n", "path": "account_payment_purchase/models/account_invoice.py"}]} | 993 | 226 |
gh_patches_debug_21021 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-593 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make creator a readonly field in django admin
With a large user base the dropdown used for the user selection in django admin becomes unresponsive. As there is no apparent reason to change the creator of an object (comment, item, rate, poll, ...) the creator field should be made read_only.
The same problem occurs for every model where a user is set as a foreign key (Action: actor, Projects: member/moderator, Organisation: initiator)
The readonly property can either be set on every Admin class by setting ` readonly_fields = ('creator',)` individually or by using a custom `A4Admin(admin.ModelAdmin)` which has to be set as the parent for every Admin class used.
</issue>
<code>
[start of apps/polls/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5
6 class ChoiceInline(admin.TabularInline):
7 model = models.Choice
8
9
10 class QuestionAdmin(admin.ModelAdmin):
11 inlines = [
12 ChoiceInline
13 ]
14
15
16 class VoteAdmin(admin.ModelAdmin):
17 list_filter = ('choice__question',)
18
19
20 admin.site.register(models.Question, QuestionAdmin)
21 admin.site.register(models.Vote, VoteAdmin)
22
[end of apps/polls/admin.py]
[start of apps/documents/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5
6 class ParagraphAdmin(admin.ModelAdmin):
7 list_filter = ('chapter',)
8
9
10 admin.site.register(models.Chapter)
11 admin.site.register(models.Paragraph, ParagraphAdmin)
12
[end of apps/documents/admin.py]
[start of apps/offlineevents/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5 admin.site.register(models.OfflineEvent, admin.ModelAdmin)
6
[end of apps/offlineevents/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/documents/admin.py b/apps/documents/admin.py
--- a/apps/documents/admin.py
+++ b/apps/documents/admin.py
@@ -3,9 +3,12 @@
from . import models
[email protected](models.Paragraph)
class ParagraphAdmin(admin.ModelAdmin):
list_filter = ('chapter',)
+ readonly_fields = ('creator',)
-admin.site.register(models.Chapter)
-admin.site.register(models.Paragraph, ParagraphAdmin)
[email protected](models.Chapter)
+class ChapterAdmin(admin.ModelAdmin):
+ readonly_fields = ('creator', )
diff --git a/apps/offlineevents/admin.py b/apps/offlineevents/admin.py
--- a/apps/offlineevents/admin.py
+++ b/apps/offlineevents/admin.py
@@ -2,4 +2,7 @@
from . import models
-admin.site.register(models.OfflineEvent, admin.ModelAdmin)
+
[email protected](models.OfflineEvent)
+class OfflineEventAdmin(admin.ModelAdmin):
+ readonly_fields = ('creator', )
diff --git a/apps/polls/admin.py b/apps/polls/admin.py
--- a/apps/polls/admin.py
+++ b/apps/polls/admin.py
@@ -7,15 +7,8 @@
model = models.Choice
[email protected](models.Question)
class QuestionAdmin(admin.ModelAdmin):
inlines = [
ChoiceInline
]
-
-
-class VoteAdmin(admin.ModelAdmin):
- list_filter = ('choice__question',)
-
-
-admin.site.register(models.Question, QuestionAdmin)
-admin.site.register(models.Vote, VoteAdmin)
| {"golden_diff": "diff --git a/apps/documents/admin.py b/apps/documents/admin.py\n--- a/apps/documents/admin.py\n+++ b/apps/documents/admin.py\n@@ -3,9 +3,12 @@\n from . import models\n \n \[email protected](models.Paragraph)\n class ParagraphAdmin(admin.ModelAdmin):\n list_filter = ('chapter',)\n+ readonly_fields = ('creator',)\n \n \n-admin.site.register(models.Chapter)\n-admin.site.register(models.Paragraph, ParagraphAdmin)\[email protected](models.Chapter)\n+class ChapterAdmin(admin.ModelAdmin):\n+ readonly_fields = ('creator', )\ndiff --git a/apps/offlineevents/admin.py b/apps/offlineevents/admin.py\n--- a/apps/offlineevents/admin.py\n+++ b/apps/offlineevents/admin.py\n@@ -2,4 +2,7 @@\n \n from . import models\n \n-admin.site.register(models.OfflineEvent, admin.ModelAdmin)\n+\[email protected](models.OfflineEvent)\n+class OfflineEventAdmin(admin.ModelAdmin):\n+ readonly_fields = ('creator', )\ndiff --git a/apps/polls/admin.py b/apps/polls/admin.py\n--- a/apps/polls/admin.py\n+++ b/apps/polls/admin.py\n@@ -7,15 +7,8 @@\n model = models.Choice\n \n \[email protected](models.Question)\n class QuestionAdmin(admin.ModelAdmin):\n inlines = [\n ChoiceInline\n ]\n-\n-\n-class VoteAdmin(admin.ModelAdmin):\n- list_filter = ('choice__question',)\n-\n-\n-admin.site.register(models.Question, QuestionAdmin)\n-admin.site.register(models.Vote, VoteAdmin)\n", "issue": "Make creator a readonly field in django admin\nWith a large user base the dropdown used for the user selection in django admin becomes unresponsive. As there is no apparent reason to change the creator of an object (comment, item, rate, poll, ...) the creator field should be made read_only.\r\n\r\nThe same problem occurs for every model where a user is set as a foreign key (Action: actor, Projects: member/moderator, Organisation: initiator)\r\n\r\nThe readonly property can either be set on every Admin class by setting ` readonly_fields = ('creator',)` individually or by using a custom `A4Admin(admin.ModelAdmin)` which has to be set as the parent for every Admin class used.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\nclass ChoiceInline(admin.TabularInline):\n model = models.Choice\n\n\nclass QuestionAdmin(admin.ModelAdmin):\n inlines = [\n ChoiceInline\n ]\n\n\nclass VoteAdmin(admin.ModelAdmin):\n list_filter = ('choice__question',)\n\n\nadmin.site.register(models.Question, QuestionAdmin)\nadmin.site.register(models.Vote, VoteAdmin)\n", "path": "apps/polls/admin.py"}, {"content": "from django.contrib import admin\n\nfrom . import models\n\n\nclass ParagraphAdmin(admin.ModelAdmin):\n list_filter = ('chapter',)\n\n\nadmin.site.register(models.Chapter)\nadmin.site.register(models.Paragraph, ParagraphAdmin)\n", "path": "apps/documents/admin.py"}, {"content": "from django.contrib import admin\n\nfrom . import models\n\nadmin.site.register(models.OfflineEvent, admin.ModelAdmin)\n", "path": "apps/offlineevents/admin.py"}]} | 920 | 320 |
gh_patches_debug_38709 | rasdani/github-patches | git_diff | pypa__pipenv-3322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incompatibility with pypy3
### Issue description
Recent versions of pipenv no longer work with pypy3.
### Expected result
Expected pipenv to work.
### Actual result
```
❯ PYENV_VERSION=pypy3.5-6.0.0 pyenv exec pipenv --verbose
Traceback (most recent call last):
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/bin/pipenv", line 7, in <module>
from pipenv import cli
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/__init__.py", line 47, in <module>
from .cli import cli
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/__init__.py", line 3, in <module>
from .command import cli
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/command.py", line 19, in <module>
from .options import (
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/options.py", line 11, in <module>
from .. import environments
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/environments.py", line 7, in <module>
from ._compat import fix_utf8
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/_compat.py", line 98, in <module>
OUT_ENCODING, ERR_ENCODING = force_encoding()
File "/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/_compat.py", line 68, in force_encoding
from ctypes import pythonapi, py_object, c_char_p
ImportError: cannot import name 'pythonapi'
```
### Steps to replicate
Simply install pipenv with PyPy3, then try to run any pipenv command.
-------------------------------------------------------------------------------
`pipenv --support` won't work either.
PyPy doesn't support `ctypes.pythonapi`, but the root cause of the issue is that it shouldn't even try to import this module in the first place. I've investigated a bit and it looks with PyPy3, `sys.stdout.encoding` returns `"utf8"` instead of `"utf-8"`. I'm unsure whether this is a PyPy bug or not, but since pipenv used to work fine with PyPy3 prior to (I think) #3096, can we relax the condition on [this line](https://github.com/pypa/pipenv/blob/b03983e61ef284157c725dfedef02fef5829341f/pipenv/_compat.py#L64) to allow for `"utf8"` as well as `"utf-8"`?
</issue>
<code>
[start of pipenv/_compat.py]
1 # -*- coding=utf-8 -*-
2 """A compatibility module for pipenv's backports and manipulations.
3
4 Exposes a standard API that enables compatibility across python versions,
5 operating systems, etc.
6 """
7
8 import functools
9 import importlib
10 import io
11 import os
12 import six
13 import sys
14 import warnings
15 import vistir
16 from .vendor.vistir.compat import NamedTemporaryFile, Path, ResourceWarning, TemporaryDirectory
17
18 # Backport required for earlier versions of Python.
19 if sys.version_info < (3, 3):
20 from .vendor.backports.shutil_get_terminal_size import get_terminal_size
21 else:
22 from shutil import get_terminal_size
23
24 warnings.filterwarnings("ignore", category=ResourceWarning)
25
26
27 __all__ = [
28 "NamedTemporaryFile", "Path", "ResourceWarning", "TemporaryDirectory",
29 "get_terminal_size", "getpreferredencoding", "DEFAULT_ENCODING", "force_encoding",
30 "UNICODE_TO_ASCII_TRANSLATION_MAP", "decode_output", "fix_utf8"
31 ]
32
33
34 def getpreferredencoding():
35 import locale
36 # Borrowed from Invoke
37 # (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
38 _encoding = locale.getpreferredencoding(False)
39 if six.PY2 and not sys.platform == "win32":
40 _default_encoding = locale.getdefaultlocale()[1]
41 if _default_encoding is not None:
42 _encoding = _default_encoding
43 return _encoding
44
45
46 DEFAULT_ENCODING = getpreferredencoding()
47
48
49 # From https://github.com/CarlFK/veyepar/blob/5c5de47/dj/scripts/fixunicode.py
50 # MIT LIcensed, thanks Carl!
51 def force_encoding():
52 try:
53 stdout_isatty = sys.stdout.isatty
54 stderr_isatty = sys.stderr.isatty
55 except AttributeError:
56 return DEFAULT_ENCODING, DEFAULT_ENCODING
57 else:
58 if not (stdout_isatty() and stderr_isatty()):
59 return DEFAULT_ENCODING, DEFAULT_ENCODING
60 stdout_encoding = sys.stdout.encoding
61 stderr_encoding = sys.stderr.encoding
62 if sys.platform == "win32" and sys.version_info >= (3, 1):
63 return DEFAULT_ENCODING, DEFAULT_ENCODING
64 if stdout_encoding.lower() != "utf-8" or stderr_encoding.lower() != "utf-8":
65
66 from ctypes import pythonapi, py_object, c_char_p
67 try:
68 PyFile_SetEncoding = pythonapi.PyFile_SetEncoding
69 except AttributeError:
70 return DEFAULT_ENCODING, DEFAULT_ENCODING
71 else:
72 PyFile_SetEncoding.argtypes = (py_object, c_char_p)
73 if stdout_encoding.lower() != "utf-8":
74 try:
75 was_set = PyFile_SetEncoding(sys.stdout, "utf-8")
76 except OSError:
77 was_set = False
78 if not was_set:
79 stdout_encoding = DEFAULT_ENCODING
80 else:
81 stdout_encoding = "utf-8"
82
83 if stderr_encoding.lower() != "utf-8":
84 try:
85 was_set = PyFile_SetEncoding(sys.stderr, "utf-8")
86 except OSError:
87 was_set = False
88 if not was_set:
89 stderr_encoding = DEFAULT_ENCODING
90 else:
91 stderr_encoding = "utf-8"
92
93 return stdout_encoding, stderr_encoding
94
95
96 OUT_ENCODING, ERR_ENCODING = force_encoding()
97
98
99 UNICODE_TO_ASCII_TRANSLATION_MAP = {
100 8230: u"...",
101 8211: u"-",
102 10004: u"OK",
103 10008: u"x",
104 }
105
106
107 def decode_output(output):
108 if not isinstance(output, six.string_types):
109 return output
110 try:
111 output = output.encode(DEFAULT_ENCODING)
112 except (AttributeError, UnicodeDecodeError, UnicodeEncodeError):
113 if six.PY2:
114 output = unicode.translate(vistir.misc.to_text(output),
115 UNICODE_TO_ASCII_TRANSLATION_MAP)
116 else:
117 output = output.translate(UNICODE_TO_ASCII_TRANSLATION_MAP)
118 output = output.encode(DEFAULT_ENCODING, "replace")
119 return vistir.misc.to_text(output, encoding=DEFAULT_ENCODING, errors="replace")
120
121
122 def fix_utf8(text):
123 if not isinstance(text, six.string_types):
124 return text
125 try:
126 text = decode_output(text)
127 except UnicodeDecodeError:
128 if six.PY2:
129 text = unicode.translate(vistir.misc.to_text(text), UNICODE_TO_ASCII_TRANSLATION_MAP)
130 return text
131
[end of pipenv/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pipenv/_compat.py b/pipenv/_compat.py
--- a/pipenv/_compat.py
+++ b/pipenv/_compat.py
@@ -26,8 +26,8 @@
__all__ = [
"NamedTemporaryFile", "Path", "ResourceWarning", "TemporaryDirectory",
- "get_terminal_size", "getpreferredencoding", "DEFAULT_ENCODING", "force_encoding",
- "UNICODE_TO_ASCII_TRANSLATION_MAP", "decode_output", "fix_utf8"
+ "get_terminal_size", "getpreferredencoding", "DEFAULT_ENCODING", "canonical_encoding_name",
+ "force_encoding", "UNICODE_TO_ASCII_TRANSLATION_MAP", "decode_output", "fix_utf8"
]
@@ -46,6 +46,16 @@
DEFAULT_ENCODING = getpreferredencoding()
+def canonical_encoding_name(name):
+ import codecs
+ try:
+ codec = codecs.lookup(name)
+ except LookupError:
+ return name
+ else:
+ return codec.name
+
+
# From https://github.com/CarlFK/veyepar/blob/5c5de47/dj/scripts/fixunicode.py
# MIT LIcensed, thanks Carl!
def force_encoding():
@@ -57,20 +67,23 @@
else:
if not (stdout_isatty() and stderr_isatty()):
return DEFAULT_ENCODING, DEFAULT_ENCODING
- stdout_encoding = sys.stdout.encoding
- stderr_encoding = sys.stderr.encoding
+ stdout_encoding = canonical_encoding_name(sys.stdout.encoding)
+ stderr_encoding = canonical_encoding_name(sys.stderr.encoding)
if sys.platform == "win32" and sys.version_info >= (3, 1):
return DEFAULT_ENCODING, DEFAULT_ENCODING
- if stdout_encoding.lower() != "utf-8" or stderr_encoding.lower() != "utf-8":
+ if stdout_encoding != "utf-8" or stderr_encoding != "utf-8":
- from ctypes import pythonapi, py_object, c_char_p
+ try:
+ from ctypes import pythonapi, py_object, c_char_p
+ except ImportError:
+ return DEFAULT_ENCODING, DEFAULT_ENCODING
try:
PyFile_SetEncoding = pythonapi.PyFile_SetEncoding
except AttributeError:
return DEFAULT_ENCODING, DEFAULT_ENCODING
else:
PyFile_SetEncoding.argtypes = (py_object, c_char_p)
- if stdout_encoding.lower() != "utf-8":
+ if stdout_encoding != "utf-8":
try:
was_set = PyFile_SetEncoding(sys.stdout, "utf-8")
except OSError:
@@ -80,7 +93,7 @@
else:
stdout_encoding = "utf-8"
- if stderr_encoding.lower() != "utf-8":
+ if stderr_encoding != "utf-8":
try:
was_set = PyFile_SetEncoding(sys.stderr, "utf-8")
except OSError:
| {"golden_diff": "diff --git a/pipenv/_compat.py b/pipenv/_compat.py\n--- a/pipenv/_compat.py\n+++ b/pipenv/_compat.py\n@@ -26,8 +26,8 @@\n \n __all__ = [\n \"NamedTemporaryFile\", \"Path\", \"ResourceWarning\", \"TemporaryDirectory\",\n- \"get_terminal_size\", \"getpreferredencoding\", \"DEFAULT_ENCODING\", \"force_encoding\",\n- \"UNICODE_TO_ASCII_TRANSLATION_MAP\", \"decode_output\", \"fix_utf8\"\n+ \"get_terminal_size\", \"getpreferredencoding\", \"DEFAULT_ENCODING\", \"canonical_encoding_name\",\n+ \"force_encoding\", \"UNICODE_TO_ASCII_TRANSLATION_MAP\", \"decode_output\", \"fix_utf8\"\n ]\n \n \n@@ -46,6 +46,16 @@\n DEFAULT_ENCODING = getpreferredencoding()\n \n \n+def canonical_encoding_name(name):\n+ import codecs\n+ try:\n+ codec = codecs.lookup(name)\n+ except LookupError:\n+ return name\n+ else:\n+ return codec.name\n+\n+\n # From https://github.com/CarlFK/veyepar/blob/5c5de47/dj/scripts/fixunicode.py\n # MIT LIcensed, thanks Carl!\n def force_encoding():\n@@ -57,20 +67,23 @@\n else:\n if not (stdout_isatty() and stderr_isatty()):\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n- stdout_encoding = sys.stdout.encoding\n- stderr_encoding = sys.stderr.encoding\n+ stdout_encoding = canonical_encoding_name(sys.stdout.encoding)\n+ stderr_encoding = canonical_encoding_name(sys.stderr.encoding)\n if sys.platform == \"win32\" and sys.version_info >= (3, 1):\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n- if stdout_encoding.lower() != \"utf-8\" or stderr_encoding.lower() != \"utf-8\":\n+ if stdout_encoding != \"utf-8\" or stderr_encoding != \"utf-8\":\n \n- from ctypes import pythonapi, py_object, c_char_p\n+ try:\n+ from ctypes import pythonapi, py_object, c_char_p\n+ except ImportError:\n+ return DEFAULT_ENCODING, DEFAULT_ENCODING\n try:\n PyFile_SetEncoding = pythonapi.PyFile_SetEncoding\n except AttributeError:\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n else:\n PyFile_SetEncoding.argtypes = (py_object, c_char_p)\n- if stdout_encoding.lower() != \"utf-8\":\n+ if stdout_encoding != \"utf-8\":\n try:\n was_set = PyFile_SetEncoding(sys.stdout, \"utf-8\")\n except OSError:\n@@ -80,7 +93,7 @@\n else:\n stdout_encoding = \"utf-8\"\n \n- if stderr_encoding.lower() != \"utf-8\":\n+ if stderr_encoding != \"utf-8\":\n try:\n was_set = PyFile_SetEncoding(sys.stderr, \"utf-8\")\n except OSError:\n", "issue": "Incompatibility with pypy3\n\r\n\r\n### Issue description\r\n\r\nRecent versions of pipenv no longer work with pypy3.\r\n\r\n\r\n### Expected result\r\n\r\nExpected pipenv to work.\r\n\r\n### Actual result\r\n\r\n```\r\n\u276f PYENV_VERSION=pypy3.5-6.0.0 pyenv exec pipenv --verbose\r\nTraceback (most recent call last):\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/bin/pipenv\", line 7, in <module>\r\n from pipenv import cli\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/__init__.py\", line 47, in <module>\r\n from .cli import cli\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/__init__.py\", line 3, in <module>\r\n from .command import cli\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/command.py\", line 19, in <module>\r\n from .options import (\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/cli/options.py\", line 11, in <module>\r\n from .. import environments\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/environments.py\", line 7, in <module>\r\n from ._compat import fix_utf8\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/_compat.py\", line 98, in <module>\r\n OUT_ENCODING, ERR_ENCODING = force_encoding()\r\n File \"/Users/ob/.pyenv/versions/pypy3.5-6.0.0/site-packages/pipenv/_compat.py\", line 68, in force_encoding\r\n from ctypes import pythonapi, py_object, c_char_p\r\nImportError: cannot import name 'pythonapi'\r\n```\r\n\r\n\r\n### Steps to replicate\r\n\r\nSimply install pipenv with PyPy3, then try to run any pipenv command.\r\n\r\n-------------------------------------------------------------------------------\r\n\r\n`pipenv --support` won't work either.\r\n\r\nPyPy doesn't support `ctypes.pythonapi`, but the root cause of the issue is that it shouldn't even try to import this module in the first place. I've investigated a bit and it looks with PyPy3, `sys.stdout.encoding` returns `\"utf8\"` instead of `\"utf-8\"`. I'm unsure whether this is a PyPy bug or not, but since pipenv used to work fine with PyPy3 prior to (I think) #3096, can we relax the condition on [this line](https://github.com/pypa/pipenv/blob/b03983e61ef284157c725dfedef02fef5829341f/pipenv/_compat.py#L64) to allow for `\"utf8\"` as well as `\"utf-8\"`?\r\n\r\n\n", "before_files": [{"content": "# -*- coding=utf-8 -*-\n\"\"\"A compatibility module for pipenv's backports and manipulations.\n\nExposes a standard API that enables compatibility across python versions,\noperating systems, etc.\n\"\"\"\n\nimport functools\nimport importlib\nimport io\nimport os\nimport six\nimport sys\nimport warnings\nimport vistir\nfrom .vendor.vistir.compat import NamedTemporaryFile, Path, ResourceWarning, TemporaryDirectory\n\n# Backport required for earlier versions of Python.\nif sys.version_info < (3, 3):\n from .vendor.backports.shutil_get_terminal_size import get_terminal_size\nelse:\n from shutil import get_terminal_size\n\nwarnings.filterwarnings(\"ignore\", category=ResourceWarning)\n\n\n__all__ = [\n \"NamedTemporaryFile\", \"Path\", \"ResourceWarning\", \"TemporaryDirectory\",\n \"get_terminal_size\", \"getpreferredencoding\", \"DEFAULT_ENCODING\", \"force_encoding\",\n \"UNICODE_TO_ASCII_TRANSLATION_MAP\", \"decode_output\", \"fix_utf8\"\n]\n\n\ndef getpreferredencoding():\n import locale\n # Borrowed from Invoke\n # (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)\n _encoding = locale.getpreferredencoding(False)\n if six.PY2 and not sys.platform == \"win32\":\n _default_encoding = locale.getdefaultlocale()[1]\n if _default_encoding is not None:\n _encoding = _default_encoding\n return _encoding\n\n\nDEFAULT_ENCODING = getpreferredencoding()\n\n\n# From https://github.com/CarlFK/veyepar/blob/5c5de47/dj/scripts/fixunicode.py\n# MIT LIcensed, thanks Carl!\ndef force_encoding():\n try:\n stdout_isatty = sys.stdout.isatty\n stderr_isatty = sys.stderr.isatty\n except AttributeError:\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n else:\n if not (stdout_isatty() and stderr_isatty()):\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n stdout_encoding = sys.stdout.encoding\n stderr_encoding = sys.stderr.encoding\n if sys.platform == \"win32\" and sys.version_info >= (3, 1):\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n if stdout_encoding.lower() != \"utf-8\" or stderr_encoding.lower() != \"utf-8\":\n\n from ctypes import pythonapi, py_object, c_char_p\n try:\n PyFile_SetEncoding = pythonapi.PyFile_SetEncoding\n except AttributeError:\n return DEFAULT_ENCODING, DEFAULT_ENCODING\n else:\n PyFile_SetEncoding.argtypes = (py_object, c_char_p)\n if stdout_encoding.lower() != \"utf-8\":\n try:\n was_set = PyFile_SetEncoding(sys.stdout, \"utf-8\")\n except OSError:\n was_set = False\n if not was_set:\n stdout_encoding = DEFAULT_ENCODING\n else:\n stdout_encoding = \"utf-8\"\n\n if stderr_encoding.lower() != \"utf-8\":\n try:\n was_set = PyFile_SetEncoding(sys.stderr, \"utf-8\")\n except OSError:\n was_set = False\n if not was_set:\n stderr_encoding = DEFAULT_ENCODING\n else:\n stderr_encoding = \"utf-8\"\n\n return stdout_encoding, stderr_encoding\n\n\nOUT_ENCODING, ERR_ENCODING = force_encoding()\n\n\nUNICODE_TO_ASCII_TRANSLATION_MAP = {\n 8230: u\"...\",\n 8211: u\"-\",\n 10004: u\"OK\",\n 10008: u\"x\",\n}\n\n\ndef decode_output(output):\n if not isinstance(output, six.string_types):\n return output\n try:\n output = output.encode(DEFAULT_ENCODING)\n except (AttributeError, UnicodeDecodeError, UnicodeEncodeError):\n if six.PY2:\n output = unicode.translate(vistir.misc.to_text(output),\n UNICODE_TO_ASCII_TRANSLATION_MAP)\n else:\n output = output.translate(UNICODE_TO_ASCII_TRANSLATION_MAP)\n output = output.encode(DEFAULT_ENCODING, \"replace\")\n return vistir.misc.to_text(output, encoding=DEFAULT_ENCODING, errors=\"replace\")\n\n\ndef fix_utf8(text):\n if not isinstance(text, six.string_types):\n return text\n try:\n text = decode_output(text)\n except UnicodeDecodeError:\n if six.PY2:\n text = unicode.translate(vistir.misc.to_text(text), UNICODE_TO_ASCII_TRANSLATION_MAP)\n return text\n", "path": "pipenv/_compat.py"}]} | 2,469 | 636 |
gh_patches_debug_24554 | rasdani/github-patches | git_diff | litestar-org__litestar-174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move `requests` to `testing` extra
I was inspecting starlight dependencies and was confused to see `requests` being required without any obvious reason.
I found #13 then, but I disagree with the resolution since I believe libs required for testing purposes should not be installed for normal use. Now it only one lib (with several dependencies), but imagine if more dependencies would be added for testing.
#### What I propose:
1. Move requests from required dependencies to `testing` extra (`pip install starlight[testing]`)
1. Remove import of `starlite.testing` from `starlite` package
2. When starlight is imported explicitly (`from starlight import testint`), check for requests installed. if not, raise `RuntimeError("To access starlight.testing install starlight with [testing] extra")`
How would `pyproject.toml` of end user look like:
```toml
[tool.poetry.dependencies]
python = "^3.10"
starlite = "^1.3.9"
[tool.poetry.dev-dependencies]
starlite = {extras = ["testing"], version = "*"} # whatever version is installed + testing dependencies
pytest = "^5.2"
```
I can send a PR if changes are welcomed.
</issue>
<code>
[start of starlite/__init__.py]
1 from starlite.datastructures import File, Redirect, State, Stream, Template
2
3 from .app import Starlite
4 from .config import (
5 CacheConfig,
6 CORSConfig,
7 OpenAPIConfig,
8 StaticFilesConfig,
9 TemplateConfig,
10 )
11 from .connection import Request, WebSocket
12 from .controller import Controller
13 from .dto import DTOFactory
14 from .enums import (
15 HttpMethod,
16 MediaType,
17 OpenAPIMediaType,
18 RequestEncodingType,
19 ScopeType,
20 )
21 from .exceptions import (
22 HTTPException,
23 ImproperlyConfiguredException,
24 InternalServerException,
25 MissingDependencyException,
26 NotAuthorizedException,
27 NotFoundException,
28 PermissionDeniedException,
29 ServiceUnavailableException,
30 StarLiteException,
31 ValidationException,
32 )
33 from .handlers import (
34 ASGIRouteHandler,
35 BaseRouteHandler,
36 HTTPRouteHandler,
37 WebsocketRouteHandler,
38 asgi,
39 delete,
40 get,
41 patch,
42 post,
43 put,
44 route,
45 websocket,
46 )
47 from .logging import LoggingConfig, QueueListenerHandler
48 from .middleware import AbstractAuthenticationMiddleware, AuthenticationResult
49 from .openapi.controller import OpenAPIController
50 from .params import Body, Dependency, Parameter
51 from .plugins import PluginProtocol
52 from .provide import Provide
53 from .response import Response
54 from .router import Router
55 from .routes import BaseRoute, HTTPRoute, WebSocketRoute
56 from .testing import TestClient, create_test_client, create_test_request
57 from .types import MiddlewareProtocol, Partial, ResponseHeader
58
59 __all__ = [
60 "ASGIRouteHandler",
61 "AbstractAuthenticationMiddleware",
62 "AuthenticationResult",
63 "BaseRoute",
64 "BaseRouteHandler",
65 "Body",
66 "CORSConfig",
67 "CacheConfig",
68 "Controller",
69 "Dependency",
70 "DTOFactory",
71 "File",
72 "HTTPException",
73 "HTTPRoute",
74 "HTTPRouteHandler",
75 "HttpMethod",
76 "ImproperlyConfiguredException",
77 "InternalServerException",
78 "LoggingConfig",
79 "MediaType",
80 "MiddlewareProtocol",
81 "MissingDependencyException",
82 "NotAuthorizedException",
83 "NotFoundException",
84 "OpenAPIConfig",
85 "OpenAPIController",
86 "OpenAPIMediaType",
87 "Parameter",
88 "Partial",
89 "PermissionDeniedException",
90 "PluginProtocol",
91 "Provide",
92 "QueueListenerHandler",
93 "Redirect",
94 "Request",
95 "RequestEncodingType",
96 "Response",
97 "ResponseHeader",
98 "Router",
99 "ScopeType",
100 "ServiceUnavailableException",
101 "StarLiteException",
102 "Starlite",
103 "State",
104 "StaticFilesConfig",
105 "Stream",
106 "Template",
107 "TemplateConfig",
108 "TestClient",
109 "ValidationException",
110 "WebSocket",
111 "WebSocketRoute",
112 "WebsocketRouteHandler",
113 "asgi",
114 "create_test_client",
115 "create_test_request",
116 "delete",
117 "get",
118 "patch",
119 "post",
120 "put",
121 "route",
122 "websocket",
123 ]
124
[end of starlite/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/__init__.py b/starlite/__init__.py
--- a/starlite/__init__.py
+++ b/starlite/__init__.py
@@ -1,3 +1,5 @@
+from typing import TYPE_CHECKING, Any
+
from starlite.datastructures import File, Redirect, State, Stream, Template
from .app import Starlite
@@ -53,9 +55,12 @@
from .response import Response
from .router import Router
from .routes import BaseRoute, HTTPRoute, WebSocketRoute
-from .testing import TestClient, create_test_client, create_test_request
from .types import MiddlewareProtocol, Partial, ResponseHeader
+if TYPE_CHECKING:
+ from .testing import TestClient, create_test_client, create_test_request
+
+
__all__ = [
"ASGIRouteHandler",
"AbstractAuthenticationMiddleware",
@@ -121,3 +126,17 @@
"route",
"websocket",
]
+
+_dynamic_imports = {"TestClient", "create_test_client", "create_test_request"}
+
+
+# pylint: disable=import-outside-toplevel
+def __getattr__(name: str) -> Any:
+ """Provide lazy importing as per https://peps.python.org/pep-0562/"""
+ if name not in _dynamic_imports:
+ raise AttributeError(f"Module {__package__} has no attribute {name}")
+
+ from . import testing
+
+ attr = globals()[name] = getattr(testing, name)
+ return attr
| {"golden_diff": "diff --git a/starlite/__init__.py b/starlite/__init__.py\n--- a/starlite/__init__.py\n+++ b/starlite/__init__.py\n@@ -1,3 +1,5 @@\n+from typing import TYPE_CHECKING, Any\n+\n from starlite.datastructures import File, Redirect, State, Stream, Template\n \n from .app import Starlite\n@@ -53,9 +55,12 @@\n from .response import Response\n from .router import Router\n from .routes import BaseRoute, HTTPRoute, WebSocketRoute\n-from .testing import TestClient, create_test_client, create_test_request\n from .types import MiddlewareProtocol, Partial, ResponseHeader\n \n+if TYPE_CHECKING:\n+ from .testing import TestClient, create_test_client, create_test_request\n+\n+\n __all__ = [\n \"ASGIRouteHandler\",\n \"AbstractAuthenticationMiddleware\",\n@@ -121,3 +126,17 @@\n \"route\",\n \"websocket\",\n ]\n+\n+_dynamic_imports = {\"TestClient\", \"create_test_client\", \"create_test_request\"}\n+\n+\n+# pylint: disable=import-outside-toplevel\n+def __getattr__(name: str) -> Any:\n+ \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n+ if name not in _dynamic_imports:\n+ raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n+\n+ from . import testing\n+\n+ attr = globals()[name] = getattr(testing, name)\n+ return attr\n", "issue": "Move `requests` to `testing` extra\nI was inspecting starlight dependencies and was confused to see `requests` being required without any obvious reason.\r\n\r\nI found #13 then, but I disagree with the resolution since I believe libs required for testing purposes should not be installed for normal use. Now it only one lib (with several dependencies), but imagine if more dependencies would be added for testing.\r\n\r\n#### What I propose:\r\n1. Move requests from required dependencies to `testing` extra (`pip install starlight[testing]`)\r\n1. Remove import of `starlite.testing` from `starlite` package\r\n2. When starlight is imported explicitly (`from starlight import testint`), check for requests installed. if not, raise `RuntimeError(\"To access starlight.testing install starlight with [testing] extra\")`\r\n\r\nHow would `pyproject.toml` of end user look like:\r\n```toml\r\n[tool.poetry.dependencies]\r\npython = \"^3.10\"\r\nstarlite = \"^1.3.9\"\r\n\r\n[tool.poetry.dev-dependencies]\r\nstarlite = {extras = [\"testing\"], version = \"*\"} # whatever version is installed + testing dependencies\r\npytest = \"^5.2\"\r\n```\r\n\r\n\r\nI can send a PR if changes are welcomed.\n", "before_files": [{"content": "from starlite.datastructures import File, Redirect, State, Stream, Template\n\nfrom .app import Starlite\nfrom .config import (\n CacheConfig,\n CORSConfig,\n OpenAPIConfig,\n StaticFilesConfig,\n TemplateConfig,\n)\nfrom .connection import Request, WebSocket\nfrom .controller import Controller\nfrom .dto import DTOFactory\nfrom .enums import (\n HttpMethod,\n MediaType,\n OpenAPIMediaType,\n RequestEncodingType,\n ScopeType,\n)\nfrom .exceptions import (\n HTTPException,\n ImproperlyConfiguredException,\n InternalServerException,\n MissingDependencyException,\n NotAuthorizedException,\n NotFoundException,\n PermissionDeniedException,\n ServiceUnavailableException,\n StarLiteException,\n ValidationException,\n)\nfrom .handlers import (\n ASGIRouteHandler,\n BaseRouteHandler,\n HTTPRouteHandler,\n WebsocketRouteHandler,\n asgi,\n delete,\n get,\n patch,\n post,\n put,\n route,\n websocket,\n)\nfrom .logging import LoggingConfig, QueueListenerHandler\nfrom .middleware import AbstractAuthenticationMiddleware, AuthenticationResult\nfrom .openapi.controller import OpenAPIController\nfrom .params import Body, Dependency, Parameter\nfrom .plugins import PluginProtocol\nfrom .provide import Provide\nfrom .response import Response\nfrom .router import Router\nfrom .routes import BaseRoute, HTTPRoute, WebSocketRoute\nfrom .testing import TestClient, create_test_client, create_test_request\nfrom .types import MiddlewareProtocol, Partial, ResponseHeader\n\n__all__ = [\n \"ASGIRouteHandler\",\n \"AbstractAuthenticationMiddleware\",\n \"AuthenticationResult\",\n \"BaseRoute\",\n \"BaseRouteHandler\",\n \"Body\",\n \"CORSConfig\",\n \"CacheConfig\",\n \"Controller\",\n \"Dependency\",\n \"DTOFactory\",\n \"File\",\n \"HTTPException\",\n \"HTTPRoute\",\n \"HTTPRouteHandler\",\n \"HttpMethod\",\n \"ImproperlyConfiguredException\",\n \"InternalServerException\",\n \"LoggingConfig\",\n \"MediaType\",\n \"MiddlewareProtocol\",\n \"MissingDependencyException\",\n \"NotAuthorizedException\",\n \"NotFoundException\",\n \"OpenAPIConfig\",\n \"OpenAPIController\",\n \"OpenAPIMediaType\",\n \"Parameter\",\n \"Partial\",\n \"PermissionDeniedException\",\n \"PluginProtocol\",\n \"Provide\",\n \"QueueListenerHandler\",\n \"Redirect\",\n \"Request\",\n \"RequestEncodingType\",\n \"Response\",\n \"ResponseHeader\",\n \"Router\",\n \"ScopeType\",\n \"ServiceUnavailableException\",\n \"StarLiteException\",\n \"Starlite\",\n \"State\",\n \"StaticFilesConfig\",\n \"Stream\",\n \"Template\",\n \"TemplateConfig\",\n \"TestClient\",\n \"ValidationException\",\n \"WebSocket\",\n \"WebSocketRoute\",\n \"WebsocketRouteHandler\",\n \"asgi\",\n \"create_test_client\",\n \"create_test_request\",\n \"delete\",\n \"get\",\n \"patch\",\n \"post\",\n \"put\",\n \"route\",\n \"websocket\",\n]\n", "path": "starlite/__init__.py"}]} | 1,709 | 340 |
gh_patches_debug_41650 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-4774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade non-admin parts of UI to Bootstrap 5
### Proposed change
Currently, we are on Bootstrap 3, which is almost 10 years old. We should upgrade to Bootstrap 5!
This issue is focused on bootstrap 5 for our non-admin panel, as the admin panel is using react and already on bootstrap 4.
## Switch to webpack for CSS customization
We customize our CSS with bootstrap with custom less stuff. We should switch to using webpack instead, which would allow us to use https://getbootstrap.com/docs/5.0/getting-started/webpack/ for customization.
## Files to be modified
Now that the admin panel is react, this is simpler. The following template files need to be modified to support bootstrap 5
- [ ] 404.html
- [ ] admin.html
- [ ] error.html
- [ ] home.html
- [ ] login.html
- [ ] logout.html
- [ ] not_running.html
- [ ] oauth.html
- [ ] page.html
- [ ] spawn.html
- [ ] spawn_pending.html
- [ ] stop_pending.html
- [ ] token.html
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 import os
6 import shutil
7 import sys
8 from subprocess import check_call
9
10 from setuptools import Command, setup
11 from setuptools.command.bdist_egg import bdist_egg
12 from setuptools.command.build_py import build_py
13 from setuptools.command.develop import develop
14 from setuptools.command.sdist import sdist
15
16 shell = False
17 if os.name in ('nt', 'dos'):
18 shell = True
19 warning = "WARNING: Windows is not officially supported"
20 print(warning, file=sys.stderr)
21
22
23 pjoin = os.path.join
24
25 here = os.path.abspath(os.path.dirname(__file__))
26 share_jupyterhub = pjoin(here, 'share', 'jupyterhub')
27 static = pjoin(share_jupyterhub, 'static')
28
29 is_repo = os.path.exists(pjoin(here, '.git'))
30
31 # Build basic package data, etc.
32
33
34 def get_data_files():
35 """Get data files in share/jupyter"""
36
37 data_files = []
38 for d, dirs, filenames in os.walk(share_jupyterhub):
39 rel_d = os.path.relpath(d, here)
40 data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))
41 return data_files
42
43
44 def mtime(path):
45 """shorthand for mtime"""
46 return os.stat(path).st_mtime
47
48
49 def recursive_mtime(path):
50 """Recursively get newest mtime of files"""
51 if os.path.isfile(path):
52 return mtime(path)
53 current = 0
54 for dirname, _, filenames in os.walk(path):
55 if filenames:
56 current = max(
57 current, max(mtime(os.path.join(dirname, f)) for f in filenames)
58 )
59 return current
60
61
62 class BaseCommand(Command):
63 """Dumb empty command because Command needs subclasses to override too much"""
64
65 user_options = []
66
67 def initialize_options(self):
68 pass
69
70 def finalize_options(self):
71 pass
72
73 def get_inputs(self):
74 return []
75
76 def get_outputs(self):
77 return []
78
79
80 class NPM(BaseCommand):
81 description = "fetch static client-side components with bower"
82
83 user_options = []
84 node_modules = pjoin(here, 'node_modules')
85 bower_dir = pjoin(static, 'components')
86
87 def should_run(self):
88 if not os.path.exists(self.bower_dir):
89 return True
90 if not os.path.exists(self.node_modules):
91 return True
92 if mtime(self.bower_dir) < mtime(self.node_modules):
93 return True
94 return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))
95
96 def run(self):
97 if not self.should_run():
98 print("npm dependencies up to date")
99 return
100
101 print("installing js dependencies with npm")
102 check_call(
103 ['npm', 'install', '--progress=false', '--unsafe-perm'],
104 cwd=here,
105 shell=shell,
106 )
107 os.utime(self.node_modules)
108
109 os.utime(self.bower_dir)
110 # update data-files in case this created new files
111 self.distribution.data_files = get_data_files()
112 assert not self.should_run(), 'NPM.run failed'
113
114
115 class CSS(BaseCommand):
116 description = "compile CSS from LESS"
117
118 def should_run(self):
119 """Does less need to run?"""
120 # from IPython.html.tasks.py
121
122 css_targets = [pjoin(static, 'css', 'style.min.css')]
123 css_maps = [t + '.map' for t in css_targets]
124 targets = css_targets + css_maps
125 if not all(os.path.exists(t) for t in targets):
126 # some generated files don't exist
127 return True
128 earliest_target = sorted(mtime(t) for t in targets)[0]
129
130 # check if any .less files are newer than the generated targets
131 for dirpath, dirnames, filenames in os.walk(static):
132 for f in filenames:
133 if f.endswith('.less'):
134 path = pjoin(static, dirpath, f)
135 timestamp = mtime(path)
136 if timestamp > earliest_target:
137 return True
138
139 return False
140
141 def run(self):
142 if not self.should_run():
143 print("CSS up-to-date")
144 return
145
146 self.run_command('js')
147 print("Building css with less")
148
149 style_less = pjoin(static, 'less', 'style.less')
150 style_css = pjoin(static, 'css', 'style.min.css')
151 sourcemap = style_css + '.map'
152
153 args = [
154 'npm',
155 'run',
156 'lessc',
157 '--',
158 '--clean-css',
159 f'--source-map-basepath={static}',
160 f'--source-map={sourcemap}',
161 '--source-map-rootpath=../',
162 style_less,
163 style_css,
164 ]
165 try:
166 check_call(args, cwd=here, shell=shell)
167 except OSError as e:
168 print("Failed to run lessc: %s" % e, file=sys.stderr)
169 print("You can install js dependencies with `npm install`", file=sys.stderr)
170 raise
171 # update data-files in case this created new files
172 self.distribution.data_files = get_data_files()
173 assert not self.should_run(), 'CSS.run failed'
174
175
176 class JSX(BaseCommand):
177 description = "build admin app"
178
179 jsx_dir = pjoin(here, 'jsx')
180 js_target = pjoin(static, 'js', 'admin-react.js')
181
182 def should_run(self):
183 if os.getenv('READTHEDOCS'):
184 # yarn not available on RTD
185 return False
186
187 if not os.path.exists(self.js_target):
188 return True
189
190 js_target_mtime = mtime(self.js_target)
191 jsx_mtime = recursive_mtime(self.jsx_dir)
192 if js_target_mtime < jsx_mtime:
193 return True
194 return False
195
196 def run(self):
197 if not self.should_run():
198 print("JSX admin app is up to date")
199 return
200
201 if not shutil.which('npm'):
202 raise Exception('JSX needs to be updated but npm is not installed')
203
204 print("Installing JSX admin app requirements")
205 check_call(
206 ['npm', 'install', '--progress=false', '--unsafe-perm'],
207 cwd=self.jsx_dir,
208 shell=shell,
209 )
210
211 print("Building JSX admin app")
212 check_call(
213 ["npm", "run", "build"],
214 cwd=self.jsx_dir,
215 shell=shell,
216 )
217
218 # update data-files in case this created new files
219 self.distribution.data_files = get_data_files()
220 assert not self.should_run(), 'JSX.run failed'
221
222
223 def js_css_first(cls, strict=True):
224 class Command(cls):
225 def run(self):
226 try:
227 self.run_command('js')
228 self.run_command('css')
229 self.run_command('jsx')
230 except Exception:
231 if strict:
232 raise
233 else:
234 pass
235 return super().run()
236
237 return Command
238
239
240 class bdist_egg_disabled(bdist_egg):
241 """Disabled version of bdist_egg
242
243 Prevents setup.py install from performing setuptools' default easy_install,
244 which it should never ever do.
245 """
246
247 def run(self):
248 sys.exit(
249 "Aborting implicit building of eggs. Use `pip install .` to install from source."
250 )
251
252
253 class develop_js_css(develop):
254 def run(self):
255 if not self.uninstall:
256 self.distribution.run_command('js')
257 self.distribution.run_command('css')
258 super().run()
259
260
261 cmdclass = {
262 'js': NPM,
263 'css': CSS,
264 'jsx': JSX,
265 'build_py': js_css_first(build_py, strict=is_repo),
266 'sdist': js_css_first(sdist, strict=True),
267 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,
268 'develop': develop_js_css,
269 }
270
271 # run setup
272
273
274 def main():
275 setup(
276 cmdclass=cmdclass,
277 data_files=get_data_files(),
278 )
279
280
281 if __name__ == '__main__':
282 main()
283
[end of setup.py]
[start of jupyterhub/handlers/static.py]
1 # Copyright (c) Jupyter Development Team.
2 # Distributed under the terms of the Modified BSD License.
3 import os
4
5 from tornado.web import StaticFileHandler
6
7
8 class CacheControlStaticFilesHandler(StaticFileHandler):
9 """StaticFileHandler subclass that sets Cache-Control: no-cache without `?v=`
10
11 rather than relying on default browser cache behavior.
12 """
13
14 def compute_etag(self):
15 return None
16
17 def set_extra_headers(self, path):
18 if "v" not in self.request.arguments:
19 self.add_header("Cache-Control", "no-cache")
20
21
22 class LogoHandler(StaticFileHandler):
23 """A singular handler for serving the logo."""
24
25 def get(self):
26 return super().get('')
27
28 @classmethod
29 def get_absolute_path(cls, root, path):
30 """We only serve one file, ignore relative path"""
31 return os.path.abspath(root)
32
[end of jupyterhub/handlers/static.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/handlers/static.py b/jupyterhub/handlers/static.py
--- a/jupyterhub/handlers/static.py
+++ b/jupyterhub/handlers/static.py
@@ -15,7 +15,9 @@
return None
def set_extra_headers(self, path):
- if "v" not in self.request.arguments:
+ if "v" not in self.request.arguments or self.settings.get(
+ "no_cache_static", False
+ ):
self.add_header("Cache-Control", "no-cache")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -113,27 +113,34 @@
class CSS(BaseCommand):
- description = "compile CSS from LESS"
+ description = "compile CSS"
def should_run(self):
- """Does less need to run?"""
- # from IPython.html.tasks.py
-
+ """Does CSS need to run?"""
css_targets = [pjoin(static, 'css', 'style.min.css')]
css_maps = [t + '.map' for t in css_targets]
targets = css_targets + css_maps
- if not all(os.path.exists(t) for t in targets):
- # some generated files don't exist
- return True
- earliest_target = sorted(mtime(t) for t in targets)[0]
-
- # check if any .less files are newer than the generated targets
+ earliest_target_mtime = float('inf')
+ earliest_target_name = ''
+ for t in targets:
+ if not os.path.exists(t):
+ print(f"Need to build css target: {t}")
+ return True
+ target_mtime = mtime(t)
+ if target_mtime < earliest_target_mtime:
+ earliest_target_name = t
+ earliest_target_mtime = target_mtime
+
+ # check if any .scss files are newer than the generated targets
for dirpath, dirnames, filenames in os.walk(static):
for f in filenames:
- if f.endswith('.less'):
+ if f.endswith('.scss'):
path = pjoin(static, dirpath, f)
timestamp = mtime(path)
- if timestamp > earliest_target:
+ if timestamp > earliest_target_mtime:
+ print(
+ f"mtime for {path} > {earliest_target_name}, needs update"
+ )
return True
return False
@@ -144,33 +151,18 @@
return
self.run_command('js')
- print("Building css with less")
-
- style_less = pjoin(static, 'less', 'style.less')
- style_css = pjoin(static, 'css', 'style.min.css')
- sourcemap = style_css + '.map'
-
- args = [
- 'npm',
- 'run',
- 'lessc',
- '--',
- '--clean-css',
- f'--source-map-basepath={static}',
- f'--source-map={sourcemap}',
- '--source-map-rootpath=../',
- style_less,
- style_css,
- ]
+ print("Building css")
+
+ args = ['npm', 'run', 'css']
try:
check_call(args, cwd=here, shell=shell)
except OSError as e:
- print("Failed to run lessc: %s" % e, file=sys.stderr)
+ print("Failed to build css: %s" % e, file=sys.stderr)
print("You can install js dependencies with `npm install`", file=sys.stderr)
raise
# update data-files in case this created new files
self.distribution.data_files = get_data_files()
- assert not self.should_run(), 'CSS.run failed'
+ assert not self.should_run(), 'CSS.run did not produce up-to-date output'
class JSX(BaseCommand):
| {"golden_diff": "diff --git a/jupyterhub/handlers/static.py b/jupyterhub/handlers/static.py\n--- a/jupyterhub/handlers/static.py\n+++ b/jupyterhub/handlers/static.py\n@@ -15,7 +15,9 @@\n return None\n \n def set_extra_headers(self, path):\n- if \"v\" not in self.request.arguments:\n+ if \"v\" not in self.request.arguments or self.settings.get(\n+ \"no_cache_static\", False\n+ ):\n self.add_header(\"Cache-Control\", \"no-cache\")\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -113,27 +113,34 @@\n \n \n class CSS(BaseCommand):\n- description = \"compile CSS from LESS\"\n+ description = \"compile CSS\"\n \n def should_run(self):\n- \"\"\"Does less need to run?\"\"\"\n- # from IPython.html.tasks.py\n-\n+ \"\"\"Does CSS need to run?\"\"\"\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n- if not all(os.path.exists(t) for t in targets):\n- # some generated files don't exist\n- return True\n- earliest_target = sorted(mtime(t) for t in targets)[0]\n-\n- # check if any .less files are newer than the generated targets\n+ earliest_target_mtime = float('inf')\n+ earliest_target_name = ''\n+ for t in targets:\n+ if not os.path.exists(t):\n+ print(f\"Need to build css target: {t}\")\n+ return True\n+ target_mtime = mtime(t)\n+ if target_mtime < earliest_target_mtime:\n+ earliest_target_name = t\n+ earliest_target_mtime = target_mtime\n+\n+ # check if any .scss files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n- if f.endswith('.less'):\n+ if f.endswith('.scss'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n- if timestamp > earliest_target:\n+ if timestamp > earliest_target_mtime:\n+ print(\n+ f\"mtime for {path} > {earliest_target_name}, needs update\"\n+ )\n return True\n \n return False\n@@ -144,33 +151,18 @@\n return\n \n self.run_command('js')\n- print(\"Building css with less\")\n-\n- style_less = pjoin(static, 'less', 'style.less')\n- style_css = pjoin(static, 'css', 'style.min.css')\n- sourcemap = style_css + '.map'\n-\n- args = [\n- 'npm',\n- 'run',\n- 'lessc',\n- '--',\n- '--clean-css',\n- f'--source-map-basepath={static}',\n- f'--source-map={sourcemap}',\n- '--source-map-rootpath=../',\n- style_less,\n- style_css,\n- ]\n+ print(\"Building css\")\n+\n+ args = ['npm', 'run', 'css']\n try:\n check_call(args, cwd=here, shell=shell)\n except OSError as e:\n- print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n+ print(\"Failed to build css: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n- assert not self.should_run(), 'CSS.run failed'\n+ assert not self.should_run(), 'CSS.run did not produce up-to-date output'\n \n \n class JSX(BaseCommand):\n", "issue": "Upgrade non-admin parts of UI to Bootstrap 5\n### Proposed change\r\n\r\nCurrently, we are on Bootstrap 3, which is almost 10 years old. We should upgrade to Bootstrap 5!\r\n\r\nThis issue is focused on bootstrap 5 for our non-admin panel, as the admin panel is using react and already on bootstrap 4.\r\n\r\n## Switch to webpack for CSS customization\r\n\r\nWe customize our CSS with bootstrap with custom less stuff. We should switch to using webpack instead, which would allow us to use https://getbootstrap.com/docs/5.0/getting-started/webpack/ for customization.\r\n\r\n## Files to be modified\r\n\r\nNow that the admin panel is react, this is simpler. The following template files need to be modified to support bootstrap 5\r\n\r\n- [ ] 404.html\r\n- [ ] admin.html\r\n- [ ] error.html\r\n- [ ] home.html\r\n- [ ] login.html\r\n- [ ] logout.html\r\n- [ ] not_running.html\r\n- [ ] oauth.html\r\n- [ ] page.html\r\n- [ ] spawn.html\r\n- [ ] spawn_pending.html\r\n- [ ] stop_pending.html\r\n- [ ] token.html\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport shutil\nimport sys\nfrom subprocess import check_call\n\nfrom setuptools import Command, setup\nfrom setuptools.command.bdist_egg import bdist_egg\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.develop import develop\nfrom setuptools.command.sdist import sdist\n\nshell = False\nif os.name in ('nt', 'dos'):\n shell = True\n warning = \"WARNING: Windows is not officially supported\"\n print(warning, file=sys.stderr)\n\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyterhub = pjoin(here, 'share', 'jupyterhub')\nstatic = pjoin(share_jupyterhub, 'static')\n\nis_repo = os.path.exists(pjoin(here, '.git'))\n\n# Build basic package data, etc.\n\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files\n\n\ndef mtime(path):\n \"\"\"shorthand for mtime\"\"\"\n return os.stat(path).st_mtime\n\n\ndef recursive_mtime(path):\n \"\"\"Recursively get newest mtime of files\"\"\"\n if os.path.isfile(path):\n return mtime(path)\n current = 0\n for dirname, _, filenames in os.walk(path):\n if filenames:\n current = max(\n current, max(mtime(os.path.join(dirname, f)) for f in filenames)\n )\n return current\n\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def get_inputs(self):\n return []\n\n def get_outputs(self):\n return []\n\n\nclass NPM(BaseCommand):\n description = \"fetch static client-side components with bower\"\n\n user_options = []\n node_modules = pjoin(here, 'node_modules')\n bower_dir = pjoin(static, 'components')\n\n def should_run(self):\n if not os.path.exists(self.bower_dir):\n return True\n if not os.path.exists(self.node_modules):\n return True\n if mtime(self.bower_dir) < mtime(self.node_modules):\n return True\n return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))\n\n def run(self):\n if not self.should_run():\n print(\"npm dependencies up to date\")\n return\n\n print(\"installing js dependencies with npm\")\n check_call(\n ['npm', 'install', '--progress=false', '--unsafe-perm'],\n cwd=here,\n shell=shell,\n )\n os.utime(self.node_modules)\n\n os.utime(self.bower_dir)\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n assert not self.should_run(), 'NPM.run failed'\n\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n\n def should_run(self):\n \"\"\"Does less need to run?\"\"\"\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False\n\n def run(self):\n if not self.should_run():\n print(\"CSS up-to-date\")\n return\n\n self.run_command('js')\n print(\"Building css with less\")\n\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n\n args = [\n 'npm',\n 'run',\n 'lessc',\n '--',\n '--clean-css',\n f'--source-map-basepath={static}',\n f'--source-map={sourcemap}',\n '--source-map-rootpath=../',\n style_less,\n style_css,\n ]\n try:\n check_call(args, cwd=here, shell=shell)\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n assert not self.should_run(), 'CSS.run failed'\n\n\nclass JSX(BaseCommand):\n description = \"build admin app\"\n\n jsx_dir = pjoin(here, 'jsx')\n js_target = pjoin(static, 'js', 'admin-react.js')\n\n def should_run(self):\n if os.getenv('READTHEDOCS'):\n # yarn not available on RTD\n return False\n\n if not os.path.exists(self.js_target):\n return True\n\n js_target_mtime = mtime(self.js_target)\n jsx_mtime = recursive_mtime(self.jsx_dir)\n if js_target_mtime < jsx_mtime:\n return True\n return False\n\n def run(self):\n if not self.should_run():\n print(\"JSX admin app is up to date\")\n return\n\n if not shutil.which('npm'):\n raise Exception('JSX needs to be updated but npm is not installed')\n\n print(\"Installing JSX admin app requirements\")\n check_call(\n ['npm', 'install', '--progress=false', '--unsafe-perm'],\n cwd=self.jsx_dir,\n shell=shell,\n )\n\n print(\"Building JSX admin app\")\n check_call(\n [\"npm\", \"run\", \"build\"],\n cwd=self.jsx_dir,\n shell=shell,\n )\n\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n assert not self.should_run(), 'JSX.run failed'\n\n\ndef js_css_first(cls, strict=True):\n class Command(cls):\n def run(self):\n try:\n self.run_command('js')\n self.run_command('css')\n self.run_command('jsx')\n except Exception:\n if strict:\n raise\n else:\n pass\n return super().run()\n\n return Command\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install from performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n\n def run(self):\n sys.exit(\n \"Aborting implicit building of eggs. Use `pip install .` to install from source.\"\n )\n\n\nclass develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n super().run()\n\n\ncmdclass = {\n 'js': NPM,\n 'css': CSS,\n 'jsx': JSX,\n 'build_py': js_css_first(build_py, strict=is_repo),\n 'sdist': js_css_first(sdist, strict=True),\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n 'develop': develop_js_css,\n}\n\n# run setup\n\n\ndef main():\n setup(\n cmdclass=cmdclass,\n data_files=get_data_files(),\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}, {"content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport os\n\nfrom tornado.web import StaticFileHandler\n\n\nclass CacheControlStaticFilesHandler(StaticFileHandler):\n \"\"\"StaticFileHandler subclass that sets Cache-Control: no-cache without `?v=`\n\n rather than relying on default browser cache behavior.\n \"\"\"\n\n def compute_etag(self):\n return None\n\n def set_extra_headers(self, path):\n if \"v\" not in self.request.arguments:\n self.add_header(\"Cache-Control\", \"no-cache\")\n\n\nclass LogoHandler(StaticFileHandler):\n \"\"\"A singular handler for serving the logo.\"\"\"\n\n def get(self):\n return super().get('')\n\n @classmethod\n def get_absolute_path(cls, root, path):\n \"\"\"We only serve one file, ignore relative path\"\"\"\n return os.path.abspath(root)\n", "path": "jupyterhub/handlers/static.py"}]} | 3,568 | 862 |
gh_patches_debug_13005 | rasdani/github-patches | git_diff | tensorflow__tfx-25 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
base_component.BaseComponent.__str__ method raising KeyError
I got a `KeyError` when calling this method: `base_component.BaseComponent.__str__`
Here is the code to reproduce:
```python
import os
from tfx.utils.dsl_utils import csv_input
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
examples = csv_input(_data_root)
example_gen = CsvExampleGen(input_base=examples)
print(example_gen)
```
The error trace is:
```
/Users/alelevier/Documents/github/tfx/tfx/components/base/base_component.pyc in __str__(self)
89 input_dict=self.input_dict,
90 outputs=self.outputs,
---> 91 exec_properties=self.exec_properties)
92
93 def __repr__(self):
KeyError: '\n component_name'
```
I looked at the method, it needs use double `{{` and `}}` so change from:
```
def __str__(self):
return """
{
component_name: {component_name},
unique_name: {unique_name},
driver: {driver},
executor: {executor},
input_dict: {input_dict},
outputs: {outputs},
exec_properties: {exec_properties}
}
""".format( # pylint: disable=missing-format-argument-key
component_name=self.component_name,
unique_name=self.unique_name,
driver=self.driver,
executor=self.executor,
input_dict=self.input_dict,
outputs=self.outputs,
exec_properties=self.exec_properties)
```
To:
```
def __str__(self):
return """
{{
component_name: {component_name},
unique_name: {unique_name},
driver: {driver},
executor: {executor},
input_dict: {input_dict},
outputs: {outputs},
exec_properties: {exec_properties}
}}
""".format( # pylint: disable=missing-format-argument-key
component_name=self.component_name,
unique_name=self.unique_name,
driver=self.driver,
executor=self.executor,
input_dict=self.input_dict,
outputs=self.outputs,
exec_properties=self.exec_properties)
```
</issue>
<code>
[start of tfx/components/base/base_component.py]
1 # Copyright 2019 Google LLC. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Base class for all TFX components."""
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import abc
21 from six import with_metaclass
22 from typing import Any
23 from typing import Dict
24 from typing import Optional
25 from typing import Text
26
27 from tfx.utils import channel
28
29
30 class ComponentOutputs(object):
31 """Helper class to wrap outputs from TFX components."""
32
33 def __init__(self, d):
34 self.__dict__ = d
35
36 def get_all(self):
37 return self.__dict__
38
39
40 class BaseComponent(with_metaclass(abc.ABCMeta, object)):
41 """Base TFX component.
42
43 This is the parent class of any TFX component.
44
45 Attributes:
46 component_name: Name of the component, should be unique per component class.
47 unique_name: Unique name for every component class instance.
48 driver: Driver class to handle pre-execution behaviors in a component.
49 executor: Executor class to do the real execution work.
50 input_dict: A [Text -> Channel] dict serving as the inputs to the component.
51 exec_properties: A [Text -> Any] dict serving as additional properties
52 needed for execution.
53 outputs: Optional Channel destinations of the component.
54 """
55
56 def __init__(self,
57 component_name,
58 driver,
59 executor,
60 input_dict,
61 exec_properties,
62 unique_name = '',
63 outputs = ComponentOutputs({})):
64 self.component_name = component_name
65 self.driver = driver
66 self.executor = executor
67 self.input_dict = input_dict
68 self.exec_properties = exec_properties
69 self.unique_name = unique_name
70 self.outputs = outputs or self._create_outputs()
71 self._type_check(self.input_dict, self.exec_properties)
72
73 def __str__(self):
74 return """
75 {
76 component_name: {component_name},
77 unique_name: {unique_name},
78 driver: {driver},
79 executor: {executor},
80 input_dict: {input_dict},
81 outputs: {outputs},
82 exec_properties: {exec_properties}
83 }
84 """.format( # pylint: disable=missing-format-argument-key
85 component_name=self.component_name,
86 unique_name=self.unique_name,
87 driver=self.driver,
88 executor=self.executor,
89 input_dict=self.input_dict,
90 outputs=self.outputs,
91 exec_properties=self.exec_properties)
92
93 def __repr__(self):
94 return self.__str__()
95
96 @abc.abstractmethod
97 def _create_outputs(self):
98 """Creates outputs placeholder for components.
99
100 Returns:
101 ComponentOutputs object containing the dict of [Text -> Channel]
102 """
103 raise NotImplementedError
104
105 @abc.abstractmethod
106 def _type_check(self, input_dict,
107 exec_properties):
108 """Does type checking for the inputs and exec_properties.
109
110 Args:
111 input_dict: A Dict[Text, Channel] as the inputs of the Component.
112 exec_properties: A Dict[Text, Any] as the execution properties of the
113 component.
114 """
115 raise NotImplementedError
116
[end of tfx/components/base/base_component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tfx/components/base/base_component.py b/tfx/components/base/base_component.py
--- a/tfx/components/base/base_component.py
+++ b/tfx/components/base/base_component.py
@@ -72,7 +72,7 @@
def __str__(self):
return """
-{
+{{
component_name: {component_name},
unique_name: {unique_name},
driver: {driver},
@@ -80,7 +80,7 @@
input_dict: {input_dict},
outputs: {outputs},
exec_properties: {exec_properties}
-}
+}}
""".format( # pylint: disable=missing-format-argument-key
component_name=self.component_name,
unique_name=self.unique_name,
| {"golden_diff": "diff --git a/tfx/components/base/base_component.py b/tfx/components/base/base_component.py\n--- a/tfx/components/base/base_component.py\n+++ b/tfx/components/base/base_component.py\n@@ -72,7 +72,7 @@\n \n def __str__(self):\n return \"\"\"\n-{\n+{{\n component_name: {component_name},\n unique_name: {unique_name},\n driver: {driver},\n@@ -80,7 +80,7 @@\n input_dict: {input_dict},\n outputs: {outputs},\n exec_properties: {exec_properties}\n-}\n+}}\n \"\"\".format( # pylint: disable=missing-format-argument-key\n component_name=self.component_name,\n unique_name=self.unique_name,\n", "issue": "base_component.BaseComponent.__str__ method raising KeyError\nI got a `KeyError` when calling this method: `base_component.BaseComponent.__str__`\r\n\r\nHere is the code to reproduce:\r\n\r\n```python\r\nimport os\r\nfrom tfx.utils.dsl_utils import csv_input\r\nfrom tfx.components.example_gen.csv_example_gen.component import CsvExampleGen\r\n\r\n_taxi_root = os.path.join(os.environ['HOME'], 'taxi')\r\n_data_root = os.path.join(_taxi_root, 'data/simple')\r\nexamples = csv_input(_data_root)\r\nexample_gen = CsvExampleGen(input_base=examples)\r\nprint(example_gen)\r\n```\r\n\r\nThe error trace is:\r\n\r\n```\r\n/Users/alelevier/Documents/github/tfx/tfx/components/base/base_component.pyc in __str__(self)\r\n 89 input_dict=self.input_dict,\r\n 90 outputs=self.outputs,\r\n---> 91 exec_properties=self.exec_properties)\r\n 92 \r\n 93 def __repr__(self):\r\n\r\nKeyError: '\\n component_name'\r\n```\r\n\r\nI looked at the method, it needs use double `{{` and `}}` so change from:\r\n\r\n```\r\n def __str__(self):\r\n return \"\"\"\r\n{\r\n component_name: {component_name},\r\n unique_name: {unique_name},\r\n driver: {driver},\r\n executor: {executor},\r\n input_dict: {input_dict},\r\n outputs: {outputs},\r\n exec_properties: {exec_properties}\r\n}\r\n \"\"\".format( # pylint: disable=missing-format-argument-key\r\n component_name=self.component_name,\r\n unique_name=self.unique_name,\r\n driver=self.driver,\r\n executor=self.executor,\r\n input_dict=self.input_dict,\r\n outputs=self.outputs,\r\n exec_properties=self.exec_properties)\r\n```\r\n\r\nTo:\r\n\r\n```\r\n def __str__(self):\r\n return \"\"\"\r\n{{\r\n component_name: {component_name},\r\n unique_name: {unique_name},\r\n driver: {driver},\r\n executor: {executor},\r\n input_dict: {input_dict},\r\n outputs: {outputs},\r\n exec_properties: {exec_properties}\r\n}}\r\n \"\"\".format( # pylint: disable=missing-format-argument-key\r\n component_name=self.component_name,\r\n unique_name=self.unique_name,\r\n driver=self.driver,\r\n executor=self.executor,\r\n input_dict=self.input_dict,\r\n outputs=self.outputs,\r\n exec_properties=self.exec_properties)\r\n```\n", "before_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base class for all TFX components.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nfrom six import with_metaclass\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Text\n\nfrom tfx.utils import channel\n\n\nclass ComponentOutputs(object):\n \"\"\"Helper class to wrap outputs from TFX components.\"\"\"\n\n def __init__(self, d):\n self.__dict__ = d\n\n def get_all(self):\n return self.__dict__\n\n\nclass BaseComponent(with_metaclass(abc.ABCMeta, object)):\n \"\"\"Base TFX component.\n\n This is the parent class of any TFX component.\n\n Attributes:\n component_name: Name of the component, should be unique per component class.\n unique_name: Unique name for every component class instance.\n driver: Driver class to handle pre-execution behaviors in a component.\n executor: Executor class to do the real execution work.\n input_dict: A [Text -> Channel] dict serving as the inputs to the component.\n exec_properties: A [Text -> Any] dict serving as additional properties\n needed for execution.\n outputs: Optional Channel destinations of the component.\n \"\"\"\n\n def __init__(self,\n component_name,\n driver,\n executor,\n input_dict,\n exec_properties,\n unique_name = '',\n outputs = ComponentOutputs({})):\n self.component_name = component_name\n self.driver = driver\n self.executor = executor\n self.input_dict = input_dict\n self.exec_properties = exec_properties\n self.unique_name = unique_name\n self.outputs = outputs or self._create_outputs()\n self._type_check(self.input_dict, self.exec_properties)\n\n def __str__(self):\n return \"\"\"\n{\n component_name: {component_name},\n unique_name: {unique_name},\n driver: {driver},\n executor: {executor},\n input_dict: {input_dict},\n outputs: {outputs},\n exec_properties: {exec_properties}\n}\n \"\"\".format( # pylint: disable=missing-format-argument-key\n component_name=self.component_name,\n unique_name=self.unique_name,\n driver=self.driver,\n executor=self.executor,\n input_dict=self.input_dict,\n outputs=self.outputs,\n exec_properties=self.exec_properties)\n\n def __repr__(self):\n return self.__str__()\n\n @abc.abstractmethod\n def _create_outputs(self):\n \"\"\"Creates outputs placeholder for components.\n\n Returns:\n ComponentOutputs object containing the dict of [Text -> Channel]\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def _type_check(self, input_dict,\n exec_properties):\n \"\"\"Does type checking for the inputs and exec_properties.\n\n Args:\n input_dict: A Dict[Text, Channel] as the inputs of the Component.\n exec_properties: A Dict[Text, Any] as the execution properties of the\n component.\n \"\"\"\n raise NotImplementedError\n", "path": "tfx/components/base/base_component.py"}]} | 2,043 | 158 |
gh_patches_debug_31131 | rasdani/github-patches | git_diff | saleor__saleor-2979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Return user's geolocalization based on IP
API should return geolocalization data guessed from users's IP. We already have all the logic in the `saleor.core.middleware.country` function. What needs to be done is to wrap this data in a GraphQL type and tie it up to the API.
E.g.:
```
shop {
geolocalization {
countryCode
}
}
```
Should it return DEFAULT_COUNTRY from settings as a fallback?
@mociepka Please provide more information about what data you'd need in the storefront.
</issue>
<code>
[start of saleor/graphql/shop/types.py]
1 import graphene
2 from django.conf import settings
3 from django_countries import countries
4 from graphql_jwt.decorators import permission_required
5 from phonenumbers import COUNTRY_CODE_TO_REGION_CODE
6
7 from ...core.permissions import get_permissions
8 from ...site import models as site_models
9 from ..core.types.common import (
10 CountryDisplay, LanguageDisplay, PermissionDisplay, WeightUnitsEnum)
11 from ..menu.types import Menu
12 from ..product.types import Collection
13 from ..utils import format_permissions_for_display
14
15
16 class Navigation(graphene.ObjectType):
17 main = graphene.Field(Menu, description='Main navigation bar.')
18 secondary = graphene.Field(Menu, description='Secondary navigation bar.')
19
20 class Meta:
21 description = 'Represents shop\'s navigation menus.'
22
23
24 class AuthorizationKey(graphene.ObjectType):
25 name = graphene.String(description='Name of the key.', required=True)
26 key = graphene.String(description='Value of the key.', required=True)
27
28
29 class Domain(graphene.ObjectType):
30 host = graphene.String(
31 description='The host name of the domain.', required=True)
32 ssl_enabled = graphene.Boolean(
33 description='Inform if SSL is enabled.', required=True)
34 url = graphene.String(
35 description='Shop\'s absolute URL.', required=True)
36
37 class Meta:
38 description = 'Represents shop\'s domain.'
39
40
41 class Shop(graphene.ObjectType):
42 authorization_keys = graphene.List(
43 AuthorizationKey, description='List of configured authorization keys.',
44 required=True)
45 countries = graphene.List(
46 CountryDisplay, description='List of countries available in the shop.',
47 required=True)
48 currencies = graphene.List(
49 graphene.String, description='List of available currencies.',
50 required=True)
51 default_currency = graphene.String(
52 description='Default shop\'s currency.', required=True)
53 default_country = graphene.Field(
54 CountryDisplay, description='Default shop\'s country')
55 description = graphene.String(description='Shop\'s description.')
56 domain = graphene.Field(
57 Domain, required=True, description='Shop\'s domain data.')
58 homepage_collection = graphene.Field(
59 Collection, description='Collection displayed on homepage')
60 languages = graphene.List(
61 LanguageDisplay,
62 description='List of the shops\'s supported languages.', required=True)
63 name = graphene.String(description='Shop\'s name.', required=True)
64 navigation = graphene.Field(
65 Navigation, description='Shop\'s navigation.')
66 permissions = graphene.List(
67 PermissionDisplay, description='List of available permissions.',
68 required=True)
69 phone_prefixes = graphene.List(
70 graphene.String, description='List of possible phone prefixes.',
71 required=True)
72 header_text = graphene.String(description='Header text')
73 include_taxes_in_prices = graphene.Boolean(
74 description='Include taxes in prices')
75 display_gross_prices = graphene.Boolean(
76 description='Display prices with tax in store')
77 track_inventory_by_default = graphene.Boolean(
78 description='Enable inventory tracking')
79 default_weight_unit = WeightUnitsEnum(description='Default weight unit')
80
81 class Meta:
82 description = '''
83 Represents a shop resource containing general shop\'s data
84 and configuration.'''
85
86 @permission_required('site.manage_settings')
87 def resolve_authorization_keys(self, info):
88 return site_models.AuthorizationKey.objects.all()
89
90 def resolve_countries(self, info):
91 return [
92 CountryDisplay(code=country[0], country=country[1])
93 for country in countries]
94
95 def resolve_currencies(self, info):
96 return settings.AVAILABLE_CURRENCIES
97
98 def resolve_domain(self, info):
99 site = info.context.site
100 return Domain(
101 host=site.domain,
102 ssl_enabled=settings.ENABLE_SSL,
103 url=info.context.build_absolute_uri('/'))
104
105 def resolve_default_currency(self, info):
106 return settings.DEFAULT_CURRENCY
107
108 def resolve_description(self, info):
109 return info.context.site.settings.description
110
111 def resolve_homepage_collection(self, info):
112 return info.context.site.settings.homepage_collection
113
114 def resolve_languages(self, info):
115 return [
116 LanguageDisplay(code=language[0], language=language[1])
117 for language in settings.LANGUAGES]
118
119 def resolve_name(self, info):
120 return info.context.site.name
121
122 def resolve_navigation(self, info):
123 site_settings = info.context.site.settings
124 return Navigation(
125 main=site_settings.top_menu, secondary=site_settings.bottom_menu)
126
127 @permission_required('site.manage_settings')
128 def resolve_permissions(self, info):
129 permissions = get_permissions()
130 return format_permissions_for_display(permissions)
131
132 def resolve_phone_prefixes(self, info):
133 return list(COUNTRY_CODE_TO_REGION_CODE.keys())
134
135 def resolve_header_text(self, info):
136 return info.context.site.settings.header_text
137
138 def resolve_include_taxes_in_prices(self, info):
139 return info.context.site.settings.include_taxes_in_prices
140
141 def resolve_display_gross_prices(self, info):
142 return info.context.site.settings.display_gross_prices
143
144 def resolve_track_inventory_by_default(self, info):
145 return info.context.site.settings.track_inventory_by_default
146
147 def resolve_default_weight_unit(self, info):
148 return info.context.site.settings.default_weight_unit
149
150 def resolve_default_country(self, info):
151 default_country_code = settings.DEFAULT_COUNTRY
152 default_country_name = countries.countries.get(default_country_code)
153 if default_country_name:
154 default_country = CountryDisplay(
155 code=default_country_code, country=default_country_name)
156 else:
157 default_country = None
158 return default_country
159
[end of saleor/graphql/shop/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/graphql/shop/types.py b/saleor/graphql/shop/types.py
--- a/saleor/graphql/shop/types.py
+++ b/saleor/graphql/shop/types.py
@@ -5,6 +5,7 @@
from phonenumbers import COUNTRY_CODE_TO_REGION_CODE
from ...core.permissions import get_permissions
+from ...core.utils import get_client_ip, get_country_by_ip
from ...site import models as site_models
from ..core.types.common import (
CountryDisplay, LanguageDisplay, PermissionDisplay, WeightUnitsEnum)
@@ -38,7 +39,19 @@
description = 'Represents shop\'s domain.'
+class Geolocalization(graphene.ObjectType):
+ country = graphene.Field(
+ CountryDisplay,
+ description='Country of the user acquired by his IP address.')
+
+ class Meta:
+ description = 'Represents customers\'s geolocalization data.'
+
+
class Shop(graphene.ObjectType):
+ geolocalization = graphene.Field(
+ Geolocalization,
+ description='Customer\'s geolocalization data.')
authorization_keys = graphene.List(
AuthorizationKey, description='List of configured authorization keys.',
required=True)
@@ -102,6 +115,15 @@
ssl_enabled=settings.ENABLE_SSL,
url=info.context.build_absolute_uri('/'))
+ def resolve_geolocalization(self, info):
+ client_ip = get_client_ip(info.context)
+ country = get_country_by_ip(client_ip)
+ if country:
+ return Geolocalization(
+ country=CountryDisplay(
+ code=country.code, country=country.name))
+ return Geolocalization(country=None)
+
def resolve_default_currency(self, info):
return settings.DEFAULT_CURRENCY
| {"golden_diff": "diff --git a/saleor/graphql/shop/types.py b/saleor/graphql/shop/types.py\n--- a/saleor/graphql/shop/types.py\n+++ b/saleor/graphql/shop/types.py\n@@ -5,6 +5,7 @@\n from phonenumbers import COUNTRY_CODE_TO_REGION_CODE\n \n from ...core.permissions import get_permissions\n+from ...core.utils import get_client_ip, get_country_by_ip\n from ...site import models as site_models\n from ..core.types.common import (\n CountryDisplay, LanguageDisplay, PermissionDisplay, WeightUnitsEnum)\n@@ -38,7 +39,19 @@\n description = 'Represents shop\\'s domain.'\n \n \n+class Geolocalization(graphene.ObjectType):\n+ country = graphene.Field(\n+ CountryDisplay,\n+ description='Country of the user acquired by his IP address.')\n+\n+ class Meta:\n+ description = 'Represents customers\\'s geolocalization data.'\n+\n+\n class Shop(graphene.ObjectType):\n+ geolocalization = graphene.Field(\n+ Geolocalization,\n+ description='Customer\\'s geolocalization data.')\n authorization_keys = graphene.List(\n AuthorizationKey, description='List of configured authorization keys.',\n required=True)\n@@ -102,6 +115,15 @@\n ssl_enabled=settings.ENABLE_SSL,\n url=info.context.build_absolute_uri('/'))\n \n+ def resolve_geolocalization(self, info):\n+ client_ip = get_client_ip(info.context)\n+ country = get_country_by_ip(client_ip)\n+ if country:\n+ return Geolocalization(\n+ country=CountryDisplay(\n+ code=country.code, country=country.name))\n+ return Geolocalization(country=None)\n+\n def resolve_default_currency(self, info):\n return settings.DEFAULT_CURRENCY\n", "issue": "Return user's geolocalization based on IP\nAPI should return geolocalization data guessed from users's IP. We already have all the logic in the `saleor.core.middleware.country` function. What needs to be done is to wrap this data in a GraphQL type and tie it up to the API. \r\n\r\nE.g.:\r\n```\r\nshop {\r\n geolocalization {\r\n countryCode\r\n }\r\n}\r\n```\r\n\r\nShould it return DEFAULT_COUNTRY from settings as a fallback?\r\n\r\n@mociepka Please provide more information about what data you'd need in the storefront.\n", "before_files": [{"content": "import graphene\nfrom django.conf import settings\nfrom django_countries import countries\nfrom graphql_jwt.decorators import permission_required\nfrom phonenumbers import COUNTRY_CODE_TO_REGION_CODE\n\nfrom ...core.permissions import get_permissions\nfrom ...site import models as site_models\nfrom ..core.types.common import (\n CountryDisplay, LanguageDisplay, PermissionDisplay, WeightUnitsEnum)\nfrom ..menu.types import Menu\nfrom ..product.types import Collection\nfrom ..utils import format_permissions_for_display\n\n\nclass Navigation(graphene.ObjectType):\n main = graphene.Field(Menu, description='Main navigation bar.')\n secondary = graphene.Field(Menu, description='Secondary navigation bar.')\n\n class Meta:\n description = 'Represents shop\\'s navigation menus.'\n\n\nclass AuthorizationKey(graphene.ObjectType):\n name = graphene.String(description='Name of the key.', required=True)\n key = graphene.String(description='Value of the key.', required=True)\n\n\nclass Domain(graphene.ObjectType):\n host = graphene.String(\n description='The host name of the domain.', required=True)\n ssl_enabled = graphene.Boolean(\n description='Inform if SSL is enabled.', required=True)\n url = graphene.String(\n description='Shop\\'s absolute URL.', required=True)\n\n class Meta:\n description = 'Represents shop\\'s domain.'\n\n\nclass Shop(graphene.ObjectType):\n authorization_keys = graphene.List(\n AuthorizationKey, description='List of configured authorization keys.',\n required=True)\n countries = graphene.List(\n CountryDisplay, description='List of countries available in the shop.',\n required=True)\n currencies = graphene.List(\n graphene.String, description='List of available currencies.',\n required=True)\n default_currency = graphene.String(\n description='Default shop\\'s currency.', required=True)\n default_country = graphene.Field(\n CountryDisplay, description='Default shop\\'s country')\n description = graphene.String(description='Shop\\'s description.')\n domain = graphene.Field(\n Domain, required=True, description='Shop\\'s domain data.')\n homepage_collection = graphene.Field(\n Collection, description='Collection displayed on homepage')\n languages = graphene.List(\n LanguageDisplay,\n description='List of the shops\\'s supported languages.', required=True)\n name = graphene.String(description='Shop\\'s name.', required=True)\n navigation = graphene.Field(\n Navigation, description='Shop\\'s navigation.')\n permissions = graphene.List(\n PermissionDisplay, description='List of available permissions.',\n required=True)\n phone_prefixes = graphene.List(\n graphene.String, description='List of possible phone prefixes.',\n required=True)\n header_text = graphene.String(description='Header text')\n include_taxes_in_prices = graphene.Boolean(\n description='Include taxes in prices')\n display_gross_prices = graphene.Boolean(\n description='Display prices with tax in store')\n track_inventory_by_default = graphene.Boolean(\n description='Enable inventory tracking')\n default_weight_unit = WeightUnitsEnum(description='Default weight unit')\n\n class Meta:\n description = '''\n Represents a shop resource containing general shop\\'s data\n and configuration.'''\n\n @permission_required('site.manage_settings')\n def resolve_authorization_keys(self, info):\n return site_models.AuthorizationKey.objects.all()\n\n def resolve_countries(self, info):\n return [\n CountryDisplay(code=country[0], country=country[1])\n for country in countries]\n\n def resolve_currencies(self, info):\n return settings.AVAILABLE_CURRENCIES\n\n def resolve_domain(self, info):\n site = info.context.site\n return Domain(\n host=site.domain,\n ssl_enabled=settings.ENABLE_SSL,\n url=info.context.build_absolute_uri('/'))\n\n def resolve_default_currency(self, info):\n return settings.DEFAULT_CURRENCY\n\n def resolve_description(self, info):\n return info.context.site.settings.description\n\n def resolve_homepage_collection(self, info):\n return info.context.site.settings.homepage_collection\n\n def resolve_languages(self, info):\n return [\n LanguageDisplay(code=language[0], language=language[1])\n for language in settings.LANGUAGES]\n\n def resolve_name(self, info):\n return info.context.site.name\n\n def resolve_navigation(self, info):\n site_settings = info.context.site.settings\n return Navigation(\n main=site_settings.top_menu, secondary=site_settings.bottom_menu)\n\n @permission_required('site.manage_settings')\n def resolve_permissions(self, info):\n permissions = get_permissions()\n return format_permissions_for_display(permissions)\n\n def resolve_phone_prefixes(self, info):\n return list(COUNTRY_CODE_TO_REGION_CODE.keys())\n\n def resolve_header_text(self, info):\n return info.context.site.settings.header_text\n\n def resolve_include_taxes_in_prices(self, info):\n return info.context.site.settings.include_taxes_in_prices\n\n def resolve_display_gross_prices(self, info):\n return info.context.site.settings.display_gross_prices\n\n def resolve_track_inventory_by_default(self, info):\n return info.context.site.settings.track_inventory_by_default\n\n def resolve_default_weight_unit(self, info):\n return info.context.site.settings.default_weight_unit\n\n def resolve_default_country(self, info):\n default_country_code = settings.DEFAULT_COUNTRY\n default_country_name = countries.countries.get(default_country_code)\n if default_country_name:\n default_country = CountryDisplay(\n code=default_country_code, country=default_country_name)\n else:\n default_country = None\n return default_country\n", "path": "saleor/graphql/shop/types.py"}]} | 2,170 | 388 |
gh_patches_debug_37626 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commit can delete/revert unstaged files if error occurs during git diff-index
### search you tried in the issue tracker
diff-index
### describe your issue
I performed a git commit with some modifications unstaged. After the commit, most of the modifications had been reverted and my work was lost. The diff saved in the patch directory had only a few of the modifications in - the ones that survived. The rest were gone.
To reproduce:
- Modify four files and stage one with `git add`
- Use `git status` to determine the order of the three unstaged files.
- Change the permission on the middle one so that git will not be able to read it
- Now do `git commit`: the changes to the first unstaged file will be preserved but the other two will be lost.
The key point, I think, is that the code in `staged_files_only.py` checks that the return code when creating the diff is non-zero which it takes to mean that the code is `1` meaning that there were diffs. However, in this case the return code is `128` which *is* non-zero but does _not_ mean success - it means error. So the code assumes the diff is OK even though it is incomplete.
### pre-commit --version
2.17.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 56b4a7e506901ff86f8de5c2551bc41f8eacf717
hooks:
- id: check-yaml
# - id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
rev: 21.11b0
hooks:
- id: black
language_version: python3.6
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
hooks:
- id: isort
args: ["--profile", "black", "--filter-files"]
```
### ~/.cache/pre-commit/pre-commit.log (if present)
_No response_
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 from __future__ import annotations
2
3 import contextlib
4 import logging
5 import os.path
6 import time
7 from typing import Generator
8
9 from pre_commit import git
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import cmd_output
12 from pre_commit.util import cmd_output_b
13 from pre_commit.xargs import xargs
14
15
16 logger = logging.getLogger('pre_commit')
17
18 # without forcing submodule.recurse=0, changes in nested submodules will be
19 # discarded if `submodule.recurse=1` is configured
20 # we choose this instead of `--no-recurse-submodules` because it works on
21 # versions of git before that option was added to `git checkout`
22 _CHECKOUT_CMD = ('git', '-c', 'submodule.recurse=0', 'checkout', '--', '.')
23
24
25 def _git_apply(patch: str) -> None:
26 args = ('apply', '--whitespace=nowarn', patch)
27 try:
28 cmd_output_b('git', *args)
29 except CalledProcessError:
30 # Retry with autocrlf=false -- see #570
31 cmd_output_b('git', '-c', 'core.autocrlf=false', *args)
32
33
34 @contextlib.contextmanager
35 def _intent_to_add_cleared() -> Generator[None, None, None]:
36 intent_to_add = git.intent_to_add_files()
37 if intent_to_add:
38 logger.warning('Unstaged intent-to-add files detected.')
39
40 xargs(('git', 'rm', '--cached', '--'), intent_to_add)
41 try:
42 yield
43 finally:
44 xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)
45 else:
46 yield
47
48
49 @contextlib.contextmanager
50 def _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:
51 tree = cmd_output('git', 'write-tree')[1].strip()
52 retcode, diff_stdout_binary, _ = cmd_output_b(
53 'git', 'diff-index', '--ignore-submodules', '--binary',
54 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
55 check=False,
56 )
57 if retcode and diff_stdout_binary.strip():
58 patch_filename = f'patch{int(time.time())}-{os.getpid()}'
59 patch_filename = os.path.join(patch_dir, patch_filename)
60 logger.warning('Unstaged files detected.')
61 logger.info(f'Stashing unstaged files to {patch_filename}.')
62 # Save the current unstaged changes as a patch
63 os.makedirs(patch_dir, exist_ok=True)
64 with open(patch_filename, 'wb') as patch_file:
65 patch_file.write(diff_stdout_binary)
66
67 # prevent recursive post-checkout hooks (#1418)
68 no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')
69
70 try:
71 cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)
72 yield
73 finally:
74 # Try to apply the patch we saved
75 try:
76 _git_apply(patch_filename)
77 except CalledProcessError:
78 logger.warning(
79 'Stashed changes conflicted with hook auto-fixes... '
80 'Rolling back fixes...',
81 )
82 # We failed to apply the patch, presumably due to fixes made
83 # by hooks.
84 # Roll back the changes made by hooks.
85 cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)
86 _git_apply(patch_filename)
87
88 logger.info(f'Restored changes from {patch_filename}.')
89 else:
90 # There weren't any staged files so we don't need to do anything
91 # special
92 yield
93
94
95 @contextlib.contextmanager
96 def staged_files_only(patch_dir: str) -> Generator[None, None, None]:
97 """Clear any unstaged changes from the git working directory inside this
98 context.
99 """
100 with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):
101 yield
102
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -7,6 +7,7 @@
from typing import Generator
from pre_commit import git
+from pre_commit.errors import FatalError
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
from pre_commit.util import cmd_output_b
@@ -49,12 +50,16 @@
@contextlib.contextmanager
def _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:
tree = cmd_output('git', 'write-tree')[1].strip()
- retcode, diff_stdout_binary, _ = cmd_output_b(
+ diff_cmd = (
'git', 'diff-index', '--ignore-submodules', '--binary',
'--exit-code', '--no-color', '--no-ext-diff', tree, '--',
- check=False,
)
- if retcode and diff_stdout_binary.strip():
+ retcode, diff_stdout, diff_stderr = cmd_output_b(*diff_cmd, check=False)
+ if retcode == 0:
+ # There weren't any staged files so we don't need to do anything
+ # special
+ yield
+ elif retcode == 1 and diff_stdout.strip():
patch_filename = f'patch{int(time.time())}-{os.getpid()}'
patch_filename = os.path.join(patch_dir, patch_filename)
logger.warning('Unstaged files detected.')
@@ -62,7 +67,7 @@
# Save the current unstaged changes as a patch
os.makedirs(patch_dir, exist_ok=True)
with open(patch_filename, 'wb') as patch_file:
- patch_file.write(diff_stdout_binary)
+ patch_file.write(diff_stdout)
# prevent recursive post-checkout hooks (#1418)
no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')
@@ -86,10 +91,12 @@
_git_apply(patch_filename)
logger.info(f'Restored changes from {patch_filename}.')
- else:
- # There weren't any staged files so we don't need to do anything
- # special
- yield
+ else: # pragma: win32 no cover
+ # some error occurred while requesting the diff
+ e = CalledProcessError(retcode, diff_cmd, b'', diff_stderr)
+ raise FatalError(
+ f'pre-commit failed to diff -- perhaps due to permissions?\n\n{e}',
+ )
@contextlib.contextmanager
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -7,6 +7,7 @@\n from typing import Generator\n \n from pre_commit import git\n+from pre_commit.errors import FatalError\n from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n from pre_commit.util import cmd_output_b\n@@ -49,12 +50,16 @@\n @contextlib.contextmanager\n def _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:\n tree = cmd_output('git', 'write-tree')[1].strip()\n- retcode, diff_stdout_binary, _ = cmd_output_b(\n+ diff_cmd = (\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n- check=False,\n )\n- if retcode and diff_stdout_binary.strip():\n+ retcode, diff_stdout, diff_stderr = cmd_output_b(*diff_cmd, check=False)\n+ if retcode == 0:\n+ # There weren't any staged files so we don't need to do anything\n+ # special\n+ yield\n+ elif retcode == 1 and diff_stdout.strip():\n patch_filename = f'patch{int(time.time())}-{os.getpid()}'\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n@@ -62,7 +67,7 @@\n # Save the current unstaged changes as a patch\n os.makedirs(patch_dir, exist_ok=True)\n with open(patch_filename, 'wb') as patch_file:\n- patch_file.write(diff_stdout_binary)\n+ patch_file.write(diff_stdout)\n \n # prevent recursive post-checkout hooks (#1418)\n no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')\n@@ -86,10 +91,12 @@\n _git_apply(patch_filename)\n \n logger.info(f'Restored changes from {patch_filename}.')\n- else:\n- # There weren't any staged files so we don't need to do anything\n- # special\n- yield\n+ else: # pragma: win32 no cover\n+ # some error occurred while requesting the diff\n+ e = CalledProcessError(retcode, diff_cmd, b'', diff_stderr)\n+ raise FatalError(\n+ f'pre-commit failed to diff -- perhaps due to permissions?\\n\\n{e}',\n+ )\n \n \n @contextlib.contextmanager\n", "issue": "pre-commit can delete/revert unstaged files if error occurs during git diff-index\n### search you tried in the issue tracker\n\ndiff-index\n\n### describe your issue\n\nI performed a git commit with some modifications unstaged. After the commit, most of the modifications had been reverted and my work was lost. The diff saved in the patch directory had only a few of the modifications in - the ones that survived. The rest were gone.\r\n\r\nTo reproduce:\r\n- Modify four files and stage one with `git add`\r\n- Use `git status` to determine the order of the three unstaged files.\r\n- Change the permission on the middle one so that git will not be able to read it\r\n- Now do `git commit`: the changes to the first unstaged file will be preserved but the other two will be lost.\r\n\r\nThe key point, I think, is that the code in `staged_files_only.py` checks that the return code when creating the diff is non-zero which it takes to mean that the code is `1` meaning that there were diffs. However, in this case the return code is `128` which *is* non-zero but does _not_ mean success - it means error. So the code assumes the diff is OK even though it is incomplete.\n\n### pre-commit --version\n\n2.17.0\n\n### .pre-commit-config.yaml\n\n```yaml\nrepos:\r\n- repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: 56b4a7e506901ff86f8de5c2551bc41f8eacf717\r\n hooks:\r\n - id: check-yaml\r\n# - id: end-of-file-fixer\r\n - id: trailing-whitespace\r\n- repo: https://github.com/psf/black\r\n rev: 21.11b0\r\n hooks:\r\n - id: black\r\n language_version: python3.6\r\n- repo: https://github.com/PyCQA/isort\r\n rev: 5.10.1\r\n hooks:\r\n - id: isort\r\n args: [\"--profile\", \"black\", \"--filter-files\"]\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport logging\nimport os.path\nimport time\nfrom typing import Generator\n\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\n\nlogger = logging.getLogger('pre_commit')\n\n# without forcing submodule.recurse=0, changes in nested submodules will be\n# discarded if `submodule.recurse=1` is configured\n# we choose this instead of `--no-recurse-submodules` because it works on\n# versions of git before that option was added to `git checkout`\n_CHECKOUT_CMD = ('git', '-c', 'submodule.recurse=0', 'checkout', '--', '.')\n\n\ndef _git_apply(patch: str) -> None:\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output_b('git', *args)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output_b('git', '-c', 'core.autocrlf=false', *args)\n\n\[email protected]\ndef _intent_to_add_cleared() -> Generator[None, None, None]:\n intent_to_add = git.intent_to_add_files()\n if intent_to_add:\n logger.warning('Unstaged intent-to-add files detected.')\n\n xargs(('git', 'rm', '--cached', '--'), intent_to_add)\n try:\n yield\n finally:\n xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)\n else:\n yield\n\n\[email protected]\ndef _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output_b(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n check=False,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = f'patch{int(time.time())}-{os.getpid()}'\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(f'Stashing unstaged files to {patch_filename}.')\n # Save the current unstaged changes as a patch\n os.makedirs(patch_dir, exist_ok=True)\n with open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # prevent recursive post-checkout hooks (#1418)\n no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')\n\n try:\n cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output_b(*_CHECKOUT_CMD, env=no_checkout_env)\n _git_apply(patch_filename)\n\n logger.info(f'Restored changes from {patch_filename}.')\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n\n\[email protected]\ndef staged_files_only(patch_dir: str) -> Generator[None, None, None]:\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 2,072 | 582 |
gh_patches_debug_41294 | rasdani/github-patches | git_diff | conan-io__conan-center-index-3119 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] xz_utils/5.2.5
### Package Details
* Package Name/Version: **xz_utils/5.2.5**
* Changelog: **https://git.tukaani.org/?p=xz.git;a=blob;f=NEWS;hb=HEAD**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/xz_utils/all/conanfile.py]
1 import os
2 from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
3 from conans.tools import Version
4
5
6 class XZUtils(ConanFile):
7 name = "xz_utils"
8 description = "XZ Utils is free general-purpose data compression software with a high compression ratio. XZ Utils were written" \
9 " for POSIX-like systems, but also work on some not-so-POSIX systems. XZ Utils are the successor to LZMA Utils."
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "https://tukaani.org/xz"
12 license = "Public Domain, GNU LGPLv2.1, GNU GPLv2, or GNU GPLv3"
13
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False]}
16 default_options = {"shared": False, "fPIC": True}
17
18 @property
19 def _source_subfolder(self):
20 return "source_subfolder"
21
22 @property
23 def _use_winbash(self):
24 return tools.os_info.is_windows
25
26 def build_requirements(self):
27 if self._use_winbash and "CONAN_BASH_PATH" not in os.environ and \
28 tools.os_info.detect_windows_subsystem() != "msys2":
29 self.build_requires("msys2/20190524")
30
31 def _effective_msbuild_type(self):
32 # treat "RelWithDebInfo" and "MinSizeRel" as "Release"
33 return "Debug" if self.settings.build_type == "Debug" else "Release"
34
35 def config_options(self):
36 if self.settings.os == "Windows":
37 del self.options.fPIC
38
39 def configure(self):
40 del self.settings.compiler.cppstd
41 del self.settings.compiler.libcxx
42
43 def _apply_patches(self):
44 # Relax Windows SDK restriction
45 tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma.vcxproj"),
46 "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
47 "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
48
49 tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma_dll.vcxproj"),
50 "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
51 "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
52
53 def source(self):
54 tools.get(**self.conan_data["sources"][self.version])
55 os.rename("xz-" + self.version, self._source_subfolder)
56 self._apply_patches()
57
58 def _build_msvc(self):
59 # windows\INSTALL-MSVC.txt
60
61 if self.settings.compiler.version == 15:
62 # emulate VS2019+ meaning of WindowsTargetPlatformVersion == "10.0"
63 # undocumented method, but officially recommended workaround by microsoft at at
64 # https://developercommunity.visualstudio.com/content/problem/140294/windowstargetplatformversion-makes-it-impossible-t.html
65 tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma.vcxproj"),
66 "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>",
67 "<WindowsTargetPlatformVersion>$([Microsoft.Build.Utilities.ToolLocationHelper]::GetLatestSDKTargetPlatformVersion('Windows', '10.0'))</WindowsTargetPlatformVersion>")
68
69 tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma_dll.vcxproj"),
70 "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>",
71 "<WindowsTargetPlatformVersion>$([Microsoft.Build.Utilities.ToolLocationHelper]::GetLatestSDKTargetPlatformVersion('Windows', '10.0'))</WindowsTargetPlatformVersion>")
72
73 msvc_version = "vs2017" if Version(self.settings.compiler.version) >= "15" else "vs2013"
74 with tools.chdir(os.path.join(self._source_subfolder, "windows", msvc_version)):
75 target = "liblzma_dll" if self.options.shared else "liblzma"
76 msbuild = MSBuild(self)
77 msbuild.build(
78 "xz_win.sln",
79 targets=[target],
80 build_type=self._effective_msbuild_type(),
81 platforms={"x86": "Win32", "x86_64": "x64"},
82 use_env=False,
83 upgrade_project=False)
84
85 def _build_configure(self):
86 with tools.chdir(self._source_subfolder):
87 args = []
88 env_build = AutoToolsBuildEnvironment(self, win_bash=self._use_winbash)
89 args = ["--disable-doc"]
90 if self.settings.os != "Windows" and self.options.fPIC:
91 args.append("--with-pic")
92 if self.options.shared:
93 args.extend(["--disable-static", "--enable-shared"])
94 else:
95 args.extend(["--enable-static", "--disable-shared"])
96 if self.settings.build_type == "Debug":
97 args.append("--enable-debug")
98 env_build.configure(args=args, build=False)
99 env_build.make()
100 env_build.install()
101
102 def build(self):
103 if self.settings.compiler == "Visual Studio":
104 self._build_msvc()
105 else:
106 self._build_configure()
107
108 def package(self):
109 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
110 if self.settings.compiler == "Visual Studio":
111 inc_dir = os.path.join(self._source_subfolder, "src", "liblzma", "api")
112 self.copy(pattern="*.h", dst="include", src=inc_dir, keep_path=True)
113 arch = {"x86": "Win32", "x86_64": "x64"}.get(str(self.settings.arch))
114 target = "liblzma_dll" if self.options.shared else "liblzma"
115 msvc_version = "vs2017" if Version(self.settings.compiler.version) >= "15" else "vs2013"
116 bin_dir = os.path.join(self._source_subfolder, "windows", msvc_version,
117 str(self._effective_msbuild_type()), arch, target)
118 self.copy(pattern="*.lib", dst="lib", src=bin_dir, keep_path=False)
119 if self.options.shared:
120 self.copy(pattern="*.dll", dst="bin", src=bin_dir, keep_path=False)
121 os.rename(os.path.join(self.package_folder, "lib", "liblzma.lib"),
122 os.path.join(self.package_folder, "lib", "lzma.lib"))
123
124 # Remove/rename forbidden files/folders in central repository
125 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
126 tools.rmdir(os.path.join(self.package_folder, "share"))
127 try:
128 os.remove(os.path.join(self.package_folder, "lib", "liblzma.la"))
129 except:
130 pass
131
132 def package_info(self):
133 if not self.options.shared:
134 self.cpp_info.defines.append("LZMA_API_STATIC")
135 if self.settings.os == "Linux":
136 self.cpp_info.system_libs.append("pthread")
137 self.cpp_info.libs = tools.collect_libs(self)
138 self.cpp_info.names["pkg_config"] = "liblzma"
139 self.cpp_info.names["cmake_find_package"] = "LibLZMA"
140 self.cpp_info.names["cmake_find_package_multi"] = "LibLZMA"
141
[end of recipes/xz_utils/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/xz_utils/all/conanfile.py b/recipes/xz_utils/all/conanfile.py
--- a/recipes/xz_utils/all/conanfile.py
+++ b/recipes/xz_utils/all/conanfile.py
@@ -9,6 +9,7 @@
" for POSIX-like systems, but also work on some not-so-POSIX systems. XZ Utils are the successor to LZMA Utils."
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://tukaani.org/xz"
+ topics = ("conan", "lzma", "xz", "compression")
license = "Public Domain, GNU LGPLv2.1, GNU GPLv2, or GNU GPLv3"
settings = "os", "arch", "compiler", "build_type"
@@ -26,7 +27,7 @@
def build_requirements(self):
if self._use_winbash and "CONAN_BASH_PATH" not in os.environ and \
tools.os_info.detect_windows_subsystem() != "msys2":
- self.build_requires("msys2/20190524")
+ self.build_requires("msys2/20200517")
def _effective_msbuild_type(self):
# treat "RelWithDebInfo" and "MinSizeRel" as "Release"
@@ -41,14 +42,15 @@
del self.settings.compiler.libcxx
def _apply_patches(self):
- # Relax Windows SDK restriction
- tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma.vcxproj"),
- "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
- "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
+ if tools.Version(self.version) == "5.2.4":
+ # Relax Windows SDK restriction
+ tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma.vcxproj"),
+ "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
+ "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
- tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma_dll.vcxproj"),
- "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
- "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
+ tools.replace_in_file(os.path.join(self._source_subfolder, "windows", "vs2017", "liblzma_dll.vcxproj"),
+ "<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>",
+ "<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
@@ -58,7 +60,9 @@
def _build_msvc(self):
# windows\INSTALL-MSVC.txt
- if self.settings.compiler.version == 15:
+ if self.settings.compiler.version == 15 and tools.Version(self.version) == "5.2.4":
+ # Workaround is required only for 5.2.4 because since 5.2.5 WindowsTargetPlatformVersion is dropped from vcproj file
+ #
# emulate VS2019+ meaning of WindowsTargetPlatformVersion == "10.0"
# undocumented method, but officially recommended workaround by microsoft at at
# https://developercommunity.visualstudio.com/content/problem/140294/windowstargetplatformversion-makes-it-impossible-t.html
| {"golden_diff": "diff --git a/recipes/xz_utils/all/conanfile.py b/recipes/xz_utils/all/conanfile.py\n--- a/recipes/xz_utils/all/conanfile.py\n+++ b/recipes/xz_utils/all/conanfile.py\n@@ -9,6 +9,7 @@\n \" for POSIX-like systems, but also work on some not-so-POSIX systems. XZ Utils are the successor to LZMA Utils.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://tukaani.org/xz\"\n+ topics = (\"conan\", \"lzma\", \"xz\", \"compression\")\n license = \"Public Domain, GNU LGPLv2.1, GNU GPLv2, or GNU GPLv3\"\n \n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n@@ -26,7 +27,7 @@\n def build_requirements(self):\n if self._use_winbash and \"CONAN_BASH_PATH\" not in os.environ and \\\n tools.os_info.detect_windows_subsystem() != \"msys2\":\n- self.build_requires(\"msys2/20190524\")\n+ self.build_requires(\"msys2/20200517\")\n \n def _effective_msbuild_type(self):\n # treat \"RelWithDebInfo\" and \"MinSizeRel\" as \"Release\"\n@@ -41,14 +42,15 @@\n del self.settings.compiler.libcxx\n \n def _apply_patches(self):\n- # Relax Windows SDK restriction\n- tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma.vcxproj\"),\n- \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n- \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n+ if tools.Version(self.version) == \"5.2.4\":\n+ # Relax Windows SDK restriction\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma.vcxproj\"),\n+ \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n+ \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n \n- tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma_dll.vcxproj\"),\n- \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n- \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma_dll.vcxproj\"),\n+ \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n+ \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n@@ -58,7 +60,9 @@\n def _build_msvc(self):\n # windows\\INSTALL-MSVC.txt\n \n- if self.settings.compiler.version == 15:\n+ if self.settings.compiler.version == 15 and tools.Version(self.version) == \"5.2.4\":\n+ # Workaround is required only for 5.2.4 because since 5.2.5 WindowsTargetPlatformVersion is dropped from vcproj file\n+ #\n # emulate VS2019+ meaning of WindowsTargetPlatformVersion == \"10.0\"\n # undocumented method, but officially recommended workaround by microsoft at at\n # https://developercommunity.visualstudio.com/content/problem/140294/windowstargetplatformversion-makes-it-impossible-t.html\n", "issue": "[request] xz_utils/5.2.5\n### Package Details\r\n * Package Name/Version: **xz_utils/5.2.5**\r\n * Changelog: **https://git.tukaani.org/?p=xz.git;a=blob;f=NEWS;hb=HEAD**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\nfrom conans.tools import Version\n\n\nclass XZUtils(ConanFile):\n name = \"xz_utils\"\n description = \"XZ Utils is free general-purpose data compression software with a high compression ratio. XZ Utils were written\" \\\n \" for POSIX-like systems, but also work on some not-so-POSIX systems. XZ Utils are the successor to LZMA Utils.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://tukaani.org/xz\"\n license = \"Public Domain, GNU LGPLv2.1, GNU GPLv2, or GNU GPLv3\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _use_winbash(self):\n return tools.os_info.is_windows\n\n def build_requirements(self):\n if self._use_winbash and \"CONAN_BASH_PATH\" not in os.environ and \\\n tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/20190524\")\n\n def _effective_msbuild_type(self):\n # treat \"RelWithDebInfo\" and \"MinSizeRel\" as \"Release\"\n return \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def _apply_patches(self):\n # Relax Windows SDK restriction\n tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma.vcxproj\"),\n \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n\n tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma_dll.vcxproj\"),\n \"<WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>\",\n \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"xz-\" + self.version, self._source_subfolder)\n self._apply_patches()\n\n def _build_msvc(self):\n # windows\\INSTALL-MSVC.txt\n\n if self.settings.compiler.version == 15:\n # emulate VS2019+ meaning of WindowsTargetPlatformVersion == \"10.0\"\n # undocumented method, but officially recommended workaround by microsoft at at\n # https://developercommunity.visualstudio.com/content/problem/140294/windowstargetplatformversion-makes-it-impossible-t.html\n tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma.vcxproj\"),\n \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\",\n \"<WindowsTargetPlatformVersion>$([Microsoft.Build.Utilities.ToolLocationHelper]::GetLatestSDKTargetPlatformVersion('Windows', '10.0'))</WindowsTargetPlatformVersion>\")\n\n tools.replace_in_file(os.path.join(self._source_subfolder, \"windows\", \"vs2017\", \"liblzma_dll.vcxproj\"),\n \"<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>\",\n \"<WindowsTargetPlatformVersion>$([Microsoft.Build.Utilities.ToolLocationHelper]::GetLatestSDKTargetPlatformVersion('Windows', '10.0'))</WindowsTargetPlatformVersion>\")\n\n msvc_version = \"vs2017\" if Version(self.settings.compiler.version) >= \"15\" else \"vs2013\"\n with tools.chdir(os.path.join(self._source_subfolder, \"windows\", msvc_version)):\n target = \"liblzma_dll\" if self.options.shared else \"liblzma\"\n msbuild = MSBuild(self)\n msbuild.build(\n \"xz_win.sln\",\n targets=[target],\n build_type=self._effective_msbuild_type(),\n platforms={\"x86\": \"Win32\", \"x86_64\": \"x64\"},\n use_env=False,\n upgrade_project=False)\n\n def _build_configure(self):\n with tools.chdir(self._source_subfolder):\n args = []\n env_build = AutoToolsBuildEnvironment(self, win_bash=self._use_winbash)\n args = [\"--disable-doc\"]\n if self.settings.os != \"Windows\" and self.options.fPIC:\n args.append(\"--with-pic\")\n if self.options.shared:\n args.extend([\"--disable-static\", \"--enable-shared\"])\n else:\n args.extend([\"--enable-static\", \"--disable-shared\"])\n if self.settings.build_type == \"Debug\":\n args.append(\"--enable-debug\")\n env_build.configure(args=args, build=False)\n env_build.make()\n env_build.install()\n\n def build(self):\n if self.settings.compiler == \"Visual Studio\":\n self._build_msvc()\n else:\n self._build_configure()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n inc_dir = os.path.join(self._source_subfolder, \"src\", \"liblzma\", \"api\")\n self.copy(pattern=\"*.h\", dst=\"include\", src=inc_dir, keep_path=True)\n arch = {\"x86\": \"Win32\", \"x86_64\": \"x64\"}.get(str(self.settings.arch))\n target = \"liblzma_dll\" if self.options.shared else \"liblzma\"\n msvc_version = \"vs2017\" if Version(self.settings.compiler.version) >= \"15\" else \"vs2013\"\n bin_dir = os.path.join(self._source_subfolder, \"windows\", msvc_version,\n str(self._effective_msbuild_type()), arch, target)\n self.copy(pattern=\"*.lib\", dst=\"lib\", src=bin_dir, keep_path=False)\n if self.options.shared:\n self.copy(pattern=\"*.dll\", dst=\"bin\", src=bin_dir, keep_path=False)\n os.rename(os.path.join(self.package_folder, \"lib\", \"liblzma.lib\"),\n os.path.join(self.package_folder, \"lib\", \"lzma.lib\"))\n\n # Remove/rename forbidden files/folders in central repository\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n try:\n os.remove(os.path.join(self.package_folder, \"lib\", \"liblzma.la\"))\n except:\n pass\n\n def package_info(self):\n if not self.options.shared:\n self.cpp_info.defines.append(\"LZMA_API_STATIC\")\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.names[\"pkg_config\"] = \"liblzma\"\n self.cpp_info.names[\"cmake_find_package\"] = \"LibLZMA\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"LibLZMA\"\n", "path": "recipes/xz_utils/all/conanfile.py"}]} | 2,611 | 864 |
gh_patches_debug_14845 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`escalation.belongs_to` cache is broken
The problem is that the cache is checked with the incoming string type, but [the string type changes](https://github.com/HypothesisWorks/hypothesis-python/blob/3.44.1/src/hypothesis/internal/escalation.py#L41) before the value is inserted:
A simple (but not elegant) fix:
```diff
--- hypothesis/internal/escalation.py
+++ hypothesis/internal/escalation.py
@@ -34,13 +34,14 @@ def belongs_to(package):
cache = {text_type: {}, binary_type: {}}
def accept(filepath):
+ ftype = type(filepath)
try:
- return cache[type(filepath)][filepath]
+ return cache[ftype][filepath]
except KeyError:
pass
filepath = encoded_filepath(filepath)
result = os.path.abspath(filepath).startswith(root)
- cache[type(filepath)][filepath] = result
+ cache[ftype][filepath] = result
return result
accept.__name__ = 'is_%s_file' % (package.__name__,)
return accept
```
</issue>
<code>
[start of src/hypothesis/internal/escalation.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2017 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22
23 import coverage
24
25 import hypothesis
26 from hypothesis.errors import StopTest, DeadlineExceeded, \
27 HypothesisException, UnsatisfiedAssumption
28 from hypothesis.internal.compat import text_type, binary_type, \
29 encoded_filepath
30
31
32 def belongs_to(package):
33 root = os.path.dirname(package.__file__)
34 cache = {text_type: {}, binary_type: {}}
35
36 def accept(filepath):
37 try:
38 return cache[type(filepath)][filepath]
39 except KeyError:
40 pass
41 filepath = encoded_filepath(filepath)
42 result = os.path.abspath(filepath).startswith(root)
43 cache[type(filepath)][filepath] = result
44 return result
45 accept.__name__ = 'is_%s_file' % (package.__name__,)
46 return accept
47
48
49 PREVENT_ESCALATION = os.getenv('HYPOTHESIS_DO_NOT_ESCALATE') == 'true'
50
51 FILE_CACHE = {}
52
53
54 is_hypothesis_file = belongs_to(hypothesis)
55 is_coverage_file = belongs_to(coverage)
56
57 HYPOTHESIS_CONTROL_EXCEPTIONS = (
58 DeadlineExceeded, StopTest, UnsatisfiedAssumption
59 )
60
61
62 def mark_for_escalation(e):
63 if not isinstance(e, HYPOTHESIS_CONTROL_EXCEPTIONS):
64 e.hypothesis_internal_always_escalate = True
65
66
67 def escalate_hypothesis_internal_error():
68 if PREVENT_ESCALATION:
69 return
70 error_type, e, tb = sys.exc_info()
71 if getattr(e, 'hypothesis_internal_always_escalate', False):
72 raise
73 import traceback
74 filepath = traceback.extract_tb(tb)[-1][0]
75 if is_hypothesis_file(filepath) and not isinstance(
76 e, (HypothesisException,) + HYPOTHESIS_CONTROL_EXCEPTIONS,
77 ):
78 raise
79 # This is so that if we do something wrong and trigger an internal Coverage
80 # error we don't try to catch it. It should be impossible to trigger, but
81 # you never know.
82 if is_coverage_file(filepath): # pragma: no cover
83 raise
84
[end of src/hypothesis/internal/escalation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/hypothesis/internal/escalation.py b/src/hypothesis/internal/escalation.py
--- a/src/hypothesis/internal/escalation.py
+++ b/src/hypothesis/internal/escalation.py
@@ -34,13 +34,15 @@
cache = {text_type: {}, binary_type: {}}
def accept(filepath):
+ ftype = type(filepath)
try:
- return cache[type(filepath)][filepath]
+ return cache[ftype][filepath]
except KeyError:
pass
- filepath = encoded_filepath(filepath)
- result = os.path.abspath(filepath).startswith(root)
- cache[type(filepath)][filepath] = result
+ new_filepath = encoded_filepath(filepath)
+ result = os.path.abspath(new_filepath).startswith(root)
+ cache[ftype][filepath] = result
+ cache[type(new_filepath)][new_filepath] = result
return result
accept.__name__ = 'is_%s_file' % (package.__name__,)
return accept
| {"golden_diff": "diff --git a/src/hypothesis/internal/escalation.py b/src/hypothesis/internal/escalation.py\n--- a/src/hypothesis/internal/escalation.py\n+++ b/src/hypothesis/internal/escalation.py\n@@ -34,13 +34,15 @@\n cache = {text_type: {}, binary_type: {}}\n \n def accept(filepath):\n+ ftype = type(filepath)\n try:\n- return cache[type(filepath)][filepath]\n+ return cache[ftype][filepath]\n except KeyError:\n pass\n- filepath = encoded_filepath(filepath)\n- result = os.path.abspath(filepath).startswith(root)\n- cache[type(filepath)][filepath] = result\n+ new_filepath = encoded_filepath(filepath)\n+ result = os.path.abspath(new_filepath).startswith(root)\n+ cache[ftype][filepath] = result\n+ cache[type(new_filepath)][new_filepath] = result\n return result\n accept.__name__ = 'is_%s_file' % (package.__name__,)\n return accept\n", "issue": "`escalation.belongs_to` cache is broken\nThe problem is that the cache is checked with the incoming string type, but [the string type changes](https://github.com/HypothesisWorks/hypothesis-python/blob/3.44.1/src/hypothesis/internal/escalation.py#L41) before the value is inserted:\r\n\r\nA simple (but not elegant) fix:\r\n\r\n```diff\r\n--- hypothesis/internal/escalation.py\r\n+++ hypothesis/internal/escalation.py\r\n@@ -34,13 +34,14 @@ def belongs_to(package):\r\n cache = {text_type: {}, binary_type: {}}\r\n \r\n def accept(filepath):\r\n+ ftype = type(filepath)\r\n try:\r\n- return cache[type(filepath)][filepath]\r\n+ return cache[ftype][filepath]\r\n except KeyError:\r\n pass\r\n filepath = encoded_filepath(filepath)\r\n result = os.path.abspath(filepath).startswith(root)\r\n- cache[type(filepath)][filepath] = result\r\n+ cache[ftype][filepath] = result\r\n return result\r\n accept.__name__ = 'is_%s_file' % (package.__name__,)\r\n return accept\r\n```\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\n\nimport coverage\n\nimport hypothesis\nfrom hypothesis.errors import StopTest, DeadlineExceeded, \\\n HypothesisException, UnsatisfiedAssumption\nfrom hypothesis.internal.compat import text_type, binary_type, \\\n encoded_filepath\n\n\ndef belongs_to(package):\n root = os.path.dirname(package.__file__)\n cache = {text_type: {}, binary_type: {}}\n\n def accept(filepath):\n try:\n return cache[type(filepath)][filepath]\n except KeyError:\n pass\n filepath = encoded_filepath(filepath)\n result = os.path.abspath(filepath).startswith(root)\n cache[type(filepath)][filepath] = result\n return result\n accept.__name__ = 'is_%s_file' % (package.__name__,)\n return accept\n\n\nPREVENT_ESCALATION = os.getenv('HYPOTHESIS_DO_NOT_ESCALATE') == 'true'\n\nFILE_CACHE = {}\n\n\nis_hypothesis_file = belongs_to(hypothesis)\nis_coverage_file = belongs_to(coverage)\n\nHYPOTHESIS_CONTROL_EXCEPTIONS = (\n DeadlineExceeded, StopTest, UnsatisfiedAssumption\n)\n\n\ndef mark_for_escalation(e):\n if not isinstance(e, HYPOTHESIS_CONTROL_EXCEPTIONS):\n e.hypothesis_internal_always_escalate = True\n\n\ndef escalate_hypothesis_internal_error():\n if PREVENT_ESCALATION:\n return\n error_type, e, tb = sys.exc_info()\n if getattr(e, 'hypothesis_internal_always_escalate', False):\n raise\n import traceback\n filepath = traceback.extract_tb(tb)[-1][0]\n if is_hypothesis_file(filepath) and not isinstance(\n e, (HypothesisException,) + HYPOTHESIS_CONTROL_EXCEPTIONS,\n ):\n raise\n # This is so that if we do something wrong and trigger an internal Coverage\n # error we don't try to catch it. It should be impossible to trigger, but\n # you never know.\n if is_coverage_file(filepath): # pragma: no cover\n raise\n", "path": "src/hypothesis/internal/escalation.py"}]} | 1,583 | 221 |
gh_patches_debug_40850 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-340 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
since 4.3 can´t connect behind proxy
<!--
Thanks for reporting issues of python-telegram-bot!
To make it easier for us to help you please enter detailed information below.
-->
### Steps to reproduce
1. pip3 install python-telegram-bot --upgrade
2. Don´t send anything
3. pip3 install python-telegram-bot-4.2.1
4. Works again!
### Expected behaviour
Tell us what should happen
### Actual behaviour
Tell us what happens instead
### Configuration
**Operating System:**
**Version of Python:**
`$ python -V` 3.5
**Version of python-telegram-bot:**
4.3.2, 4.2.1
`$ python -c 'import telegram; print(telegram.__version__)'`
### Logs
Insert logs here (if necessary)
</issue>
<code>
[start of telegram/utils/request.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2016
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains methods to make POST and GET requests"""
20
21 import json
22 import socket
23 import logging
24
25 import certifi
26 import urllib3
27 from urllib3.connection import HTTPConnection
28
29 from telegram import (InputFile, TelegramError)
30 from telegram.error import Unauthorized, NetworkError, TimedOut, BadRequest
31
32 _CON_POOL = None
33 """:type: urllib3.PoolManager"""
34 CON_POOL_SIZE = 1
35
36 logging.getLogger('urllib3').setLevel(logging.WARNING)
37
38
39 def _get_con_pool():
40 global _CON_POOL
41
42 if _CON_POOL is not None:
43 return _CON_POOL
44
45 _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,
46 cert_reqs='CERT_REQUIRED',
47 ca_certs=certifi.where(),
48 socket_options=HTTPConnection.default_socket_options + [
49 (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
50 ])
51 return _CON_POOL
52
53
54 def is_con_pool_initialized():
55 return _CON_POOL is not None
56
57
58 def stop_con_pool():
59 global _CON_POOL
60 if _CON_POOL is not None:
61 _CON_POOL.clear()
62 _CON_POOL = None
63
64
65 def _parse(json_data):
66 """Try and parse the JSON returned from Telegram.
67
68 Returns:
69 dict: A JSON parsed as Python dict with results - on error this dict will be empty.
70
71 """
72 decoded_s = json_data.decode('utf-8')
73 try:
74 data = json.loads(decoded_s)
75 except ValueError:
76 raise TelegramError('Invalid server response')
77
78 if not data.get('ok') and data.get('description'):
79 return data['description']
80
81 return data['result']
82
83
84 def _request_wrapper(*args, **kwargs):
85 """Wraps urllib3 request for handling known exceptions.
86
87 Args:
88 args: unnamed arguments, passed to urllib3 request.
89 kwargs: keyword arguments, passed tp urllib3 request.
90
91 Returns:
92 str: A non-parsed JSON text.
93
94 Raises:
95 TelegramError
96
97 """
98
99 try:
100 resp = _get_con_pool().request(*args, **kwargs)
101 except urllib3.exceptions.TimeoutError as error:
102 raise TimedOut()
103 except urllib3.exceptions.HTTPError as error:
104 # HTTPError must come last as its the base urllib3 exception class
105 # TODO: do something smart here; for now just raise NetworkError
106 raise NetworkError('urllib3 HTTPError {0}'.format(error))
107
108 if 200 <= resp.status <= 299:
109 # 200-299 range are HTTP success statuses
110 return resp.data
111
112 try:
113 message = _parse(resp.data)
114 except ValueError:
115 raise NetworkError('Unknown HTTPError {0}'.format(resp.status))
116
117 if resp.status in (401, 403):
118 raise Unauthorized()
119 elif resp.status == 400:
120 raise BadRequest(repr(message))
121 elif resp.status == 502:
122 raise NetworkError('Bad Gateway')
123 else:
124 raise NetworkError('{0} ({1})'.format(message, resp.status))
125
126
127 def get(url):
128 """Request an URL.
129 Args:
130 url:
131 The web location we want to retrieve.
132
133 Returns:
134 A JSON object.
135
136 """
137 result = _request_wrapper('GET', url)
138
139 return _parse(result)
140
141
142 def post(url, data, timeout=None):
143 """Request an URL.
144 Args:
145 url:
146 The web location we want to retrieve.
147 data:
148 A dict of (str, unicode) key/value pairs.
149 timeout:
150 float. If this value is specified, use it as the definitive timeout (in
151 seconds) for urlopen() operations. [Optional]
152
153 Notes:
154 If neither `timeout` nor `data['timeout']` is specified. The underlying
155 defaults are used.
156
157 Returns:
158 A JSON object.
159
160 """
161 urlopen_kwargs = {}
162
163 if timeout is not None:
164 urlopen_kwargs['timeout'] = timeout
165
166 if InputFile.is_inputfile(data):
167 data = InputFile(data)
168 result = _request_wrapper('POST', url, body=data.to_form(), headers=data.headers)
169 else:
170 data = json.dumps(data)
171 result = _request_wrapper('POST',
172 url,
173 body=data.encode(),
174 headers={'Content-Type': 'application/json'},
175 **urlopen_kwargs)
176
177 return _parse(result)
178
179
180 def download(url, filename):
181 """Download a file by its URL.
182 Args:
183 url:
184 The web location we want to retrieve.
185
186 filename:
187 The filename within the path to download the file.
188
189 """
190 buf = _request_wrapper('GET', url)
191 with open(filename, 'wb') as fobj:
192 fobj.write(buf)
193
[end of telegram/utils/request.py]
[start of telegram/__init__.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2016
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """A library that provides a Python interface to the Telegram Bot API"""
20
21 from sys import version_info
22
23 from .base import TelegramObject
24 from .user import User
25 from .chat import Chat
26 from .chatmember import ChatMember
27 from .photosize import PhotoSize
28 from .audio import Audio
29 from .voice import Voice
30 from .document import Document
31 from .sticker import Sticker
32 from .video import Video
33 from .contact import Contact
34 from .location import Location
35 from .venue import Venue
36 from .chataction import ChatAction
37 from .userprofilephotos import UserProfilePhotos
38 from .keyboardbutton import KeyboardButton
39 from .replymarkup import ReplyMarkup
40 from .replykeyboardmarkup import ReplyKeyboardMarkup
41 from .replykeyboardhide import ReplyKeyboardHide
42 from .forcereply import ForceReply
43 from .error import TelegramError
44 from .inputfile import InputFile
45 from .file import File
46 from .nullhandler import NullHandler
47 from .emoji import Emoji
48 from .parsemode import ParseMode
49 from .messageentity import MessageEntity
50 from .message import Message
51 from .inputmessagecontent import InputMessageContent
52 from .callbackquery import CallbackQuery
53 from .choseninlineresult import ChosenInlineResult
54 from .inlinekeyboardbutton import InlineKeyboardButton
55 from .inlinekeyboardmarkup import InlineKeyboardMarkup
56 from .inlinequery import InlineQuery
57 from .inlinequeryresult import InlineQueryResult
58 from .inlinequeryresultarticle import InlineQueryResultArticle
59 from .inlinequeryresultaudio import InlineQueryResultAudio
60 from .inlinequeryresultcachedaudio import InlineQueryResultCachedAudio
61 from .inlinequeryresultcacheddocument import InlineQueryResultCachedDocument
62 from .inlinequeryresultcachedgif import InlineQueryResultCachedGif
63 from .inlinequeryresultcachedmpeg4gif import InlineQueryResultCachedMpeg4Gif
64 from .inlinequeryresultcachedphoto import InlineQueryResultCachedPhoto
65 from .inlinequeryresultcachedsticker import InlineQueryResultCachedSticker
66 from .inlinequeryresultcachedvideo import InlineQueryResultCachedVideo
67 from .inlinequeryresultcachedvoice import InlineQueryResultCachedVoice
68 from .inlinequeryresultcontact import InlineQueryResultContact
69 from .inlinequeryresultdocument import InlineQueryResultDocument
70 from .inlinequeryresultgif import InlineQueryResultGif
71 from .inlinequeryresultlocation import InlineQueryResultLocation
72 from .inlinequeryresultmpeg4gif import InlineQueryResultMpeg4Gif
73 from .inlinequeryresultphoto import InlineQueryResultPhoto
74 from .inlinequeryresultvenue import InlineQueryResultVenue
75 from .inlinequeryresultvideo import InlineQueryResultVideo
76 from .inlinequeryresultvoice import InlineQueryResultVoice
77 from .inputtextmessagecontent import InputTextMessageContent
78 from .inputlocationmessagecontent import InputLocationMessageContent
79 from .inputvenuemessagecontent import InputVenueMessageContent
80 from .inputcontactmessagecontent import InputContactMessageContent
81 from .update import Update
82 from .bot import Bot
83
84 __author__ = '[email protected]'
85 __version__ = '4.3.2'
86 __all__ = ['Audio', 'Bot', 'Chat', 'ChatMember', 'ChatAction', 'ChosenInlineResult',
87 'CallbackQuery', 'Contact', 'Document', 'Emoji', 'File', 'ForceReply',
88 'InlineKeyboardButton', 'InlineKeyboardMarkup', 'InlineQuery', 'InlineQueryResult',
89 'InlineQueryResult', 'InlineQueryResultArticle', 'InlineQueryResultAudio',
90 'InlineQueryResultCachedAudio', 'InlineQueryResultCachedDocument',
91 'InlineQueryResultCachedGif', 'InlineQueryResultCachedMpeg4Gif',
92 'InlineQueryResultCachedPhoto', 'InlineQueryResultCachedSticker',
93 'InlineQueryResultCachedVideo', 'InlineQueryResultCachedVoice',
94 'InlineQueryResultContact', 'InlineQueryResultDocument', 'InlineQueryResultGif',
95 'InlineQueryResultLocation', 'InlineQueryResultMpeg4Gif', 'InlineQueryResultPhoto',
96 'InlineQueryResultVenue', 'InlineQueryResultVideo', 'InlineQueryResultVoice',
97 'InputContactMessageContent', 'InputFile', 'InputLocationMessageContent',
98 'InputMessageContent', 'InputTextMessageContent', 'InputVenueMessageContent',
99 'KeyboardButton', 'Location', 'Message', 'MessageEntity', 'NullHandler', 'ParseMode',
100 'PhotoSize', 'ReplyKeyboardHide', 'ReplyKeyboardMarkup', 'ReplyMarkup', 'Sticker',
101 'TelegramError', 'TelegramObject', 'Update', 'User', 'UserProfilePhotos', 'Venue',
102 'Video', 'Voice']
103
104 if version_info < (2, 7):
105 from warnings import warn
106 warn("python-telegram-bot will stop supporting Python 2.6 in a future release. "
107 "Please upgrade your Python version to at least Python 2.7!")
108
[end of telegram/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/__init__.py b/telegram/__init__.py
--- a/telegram/__init__.py
+++ b/telegram/__init__.py
@@ -82,7 +82,7 @@
from .bot import Bot
__author__ = '[email protected]'
-__version__ = '4.3.2'
+__version__ = '4.3.3'
__all__ = ['Audio', 'Bot', 'Chat', 'ChatMember', 'ChatAction', 'ChosenInlineResult',
'CallbackQuery', 'Contact', 'Document', 'Emoji', 'File', 'ForceReply',
'InlineKeyboardButton', 'InlineKeyboardMarkup', 'InlineQuery', 'InlineQueryResult',
diff --git a/telegram/utils/request.py b/telegram/utils/request.py
--- a/telegram/utils/request.py
+++ b/telegram/utils/request.py
@@ -19,6 +19,7 @@
"""This module contains methods to make POST and GET requests"""
import json
+import os
import socket
import logging
@@ -31,26 +32,41 @@
_CON_POOL = None
""":type: urllib3.PoolManager"""
+_CON_POOL_PROXY = None
+_CON_POOL_PROXY_KWARGS = {}
CON_POOL_SIZE = 1
logging.getLogger('urllib3').setLevel(logging.WARNING)
def _get_con_pool():
- global _CON_POOL
-
if _CON_POOL is not None:
return _CON_POOL
- _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,
- cert_reqs='CERT_REQUIRED',
- ca_certs=certifi.where(),
- socket_options=HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ])
+ _init_con_pool()
return _CON_POOL
+def _init_con_pool():
+ global _CON_POOL
+ kwargs = dict(maxsize=CON_POOL_SIZE,
+ cert_reqs='CERT_REQUIRED',
+ ca_certs=certifi.where(),
+ socket_options=HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ])
+ proxy_url = _get_con_pool_proxy()
+ if not proxy_url:
+ mgr = urllib3.PoolManager
+ else:
+ kwargs['proxy_url'] = proxy_url
+ if _CON_POOL_PROXY_KWARGS:
+ kwargs.update(_CON_POOL_PROXY_KWARGS)
+ mgr = urllib3.ProxyManager
+
+ _CON_POOL = mgr(**kwargs)
+
+
def is_con_pool_initialized():
return _CON_POOL is not None
@@ -62,6 +78,47 @@
_CON_POOL = None
+def set_con_pool_proxy(url, **urllib3_kwargs):
+ """Setup connection pool behind a proxy
+
+ Args:
+ url (str): The URL to the proxy server. For example: `http://127.0.0.1:3128`
+ urllib3_kwargs (dict): Arbitrary arguments passed as-is to `urllib3.ProxyManager`
+
+ """
+ global _CON_POOL_PROXY
+ global _CON_POOL_PROXY_KWARGS
+
+ if is_con_pool_initialized():
+ raise TelegramError('conpool already initialized')
+
+ _CON_POOL_PROXY = url
+ _CON_POOL_PROXY_KWARGS = urllib3_kwargs
+
+
+def _get_con_pool_proxy():
+ """Return the user configured proxy according to the following order:
+
+ * proxy configured using `set_con_pool_proxy()`.
+ * proxy set in `HTTPS_PROXY` env. var.
+ * proxy set in `https_proxy` env. var.
+ * None (if no proxy is configured)
+
+ Returns:
+ str | None
+
+ """
+ if _CON_POOL_PROXY:
+ return _CON_POOL_PROXY
+ from_env = os.environ.get('HTTPS_PROXY')
+ if from_env:
+ return from_env
+ from_env = os.environ.get('https_proxy')
+ if from_env:
+ return from_env
+ return None
+
+
def _parse(json_data):
"""Try and parse the JSON returned from Telegram.
| {"golden_diff": "diff --git a/telegram/__init__.py b/telegram/__init__.py\n--- a/telegram/__init__.py\n+++ b/telegram/__init__.py\n@@ -82,7 +82,7 @@\n from .bot import Bot\n \n __author__ = '[email protected]'\n-__version__ = '4.3.2'\n+__version__ = '4.3.3'\n __all__ = ['Audio', 'Bot', 'Chat', 'ChatMember', 'ChatAction', 'ChosenInlineResult',\n 'CallbackQuery', 'Contact', 'Document', 'Emoji', 'File', 'ForceReply',\n 'InlineKeyboardButton', 'InlineKeyboardMarkup', 'InlineQuery', 'InlineQueryResult',\ndiff --git a/telegram/utils/request.py b/telegram/utils/request.py\n--- a/telegram/utils/request.py\n+++ b/telegram/utils/request.py\n@@ -19,6 +19,7 @@\n \"\"\"This module contains methods to make POST and GET requests\"\"\"\n \n import json\n+import os\n import socket\n import logging\n \n@@ -31,26 +32,41 @@\n \n _CON_POOL = None\n \"\"\":type: urllib3.PoolManager\"\"\"\n+_CON_POOL_PROXY = None\n+_CON_POOL_PROXY_KWARGS = {}\n CON_POOL_SIZE = 1\n \n logging.getLogger('urllib3').setLevel(logging.WARNING)\n \n \n def _get_con_pool():\n- global _CON_POOL\n-\n if _CON_POOL is not None:\n return _CON_POOL\n \n- _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,\n- cert_reqs='CERT_REQUIRED',\n- ca_certs=certifi.where(),\n- socket_options=HTTPConnection.default_socket_options + [\n- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n- ])\n+ _init_con_pool()\n return _CON_POOL\n \n \n+def _init_con_pool():\n+ global _CON_POOL\n+ kwargs = dict(maxsize=CON_POOL_SIZE,\n+ cert_reqs='CERT_REQUIRED',\n+ ca_certs=certifi.where(),\n+ socket_options=HTTPConnection.default_socket_options + [\n+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n+ ])\n+ proxy_url = _get_con_pool_proxy()\n+ if not proxy_url:\n+ mgr = urllib3.PoolManager\n+ else:\n+ kwargs['proxy_url'] = proxy_url\n+ if _CON_POOL_PROXY_KWARGS:\n+ kwargs.update(_CON_POOL_PROXY_KWARGS)\n+ mgr = urllib3.ProxyManager\n+\n+ _CON_POOL = mgr(**kwargs)\n+\n+\n def is_con_pool_initialized():\n return _CON_POOL is not None\n \n@@ -62,6 +78,47 @@\n _CON_POOL = None\n \n \n+def set_con_pool_proxy(url, **urllib3_kwargs):\n+ \"\"\"Setup connection pool behind a proxy\n+\n+ Args:\n+ url (str): The URL to the proxy server. For example: `http://127.0.0.1:3128`\n+ urllib3_kwargs (dict): Arbitrary arguments passed as-is to `urllib3.ProxyManager`\n+\n+ \"\"\"\n+ global _CON_POOL_PROXY\n+ global _CON_POOL_PROXY_KWARGS\n+\n+ if is_con_pool_initialized():\n+ raise TelegramError('conpool already initialized')\n+\n+ _CON_POOL_PROXY = url\n+ _CON_POOL_PROXY_KWARGS = urllib3_kwargs\n+\n+\n+def _get_con_pool_proxy():\n+ \"\"\"Return the user configured proxy according to the following order:\n+\n+ * proxy configured using `set_con_pool_proxy()`.\n+ * proxy set in `HTTPS_PROXY` env. var.\n+ * proxy set in `https_proxy` env. var.\n+ * None (if no proxy is configured)\n+\n+ Returns:\n+ str | None\n+\n+ \"\"\"\n+ if _CON_POOL_PROXY:\n+ return _CON_POOL_PROXY\n+ from_env = os.environ.get('HTTPS_PROXY')\n+ if from_env:\n+ return from_env\n+ from_env = os.environ.get('https_proxy')\n+ if from_env:\n+ return from_env\n+ return None\n+\n+\n def _parse(json_data):\n \"\"\"Try and parse the JSON returned from Telegram.\n", "issue": "since 4.3 can\u00b4t connect behind proxy\n<!--\nThanks for reporting issues of python-telegram-bot!\nTo make it easier for us to help you please enter detailed information below.\n-->\n### Steps to reproduce\n1. pip3 install python-telegram-bot --upgrade\n2. Don\u00b4t send anything\n3. pip3 install python-telegram-bot-4.2.1\n4. Works again!\n### Expected behaviour\n\nTell us what should happen\n### Actual behaviour\n\nTell us what happens instead\n### Configuration\n\n**Operating System:**\n\n**Version of Python:**\n\n`$ python -V` 3.5\n\n**Version of python-telegram-bot:**\n4.3.2, 4.2.1\n`$ python -c 'import telegram; print(telegram.__version__)'`\n### Logs\n\nInsert logs here (if necessary)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains methods to make POST and GET requests\"\"\"\n\nimport json\nimport socket\nimport logging\n\nimport certifi\nimport urllib3\nfrom urllib3.connection import HTTPConnection\n\nfrom telegram import (InputFile, TelegramError)\nfrom telegram.error import Unauthorized, NetworkError, TimedOut, BadRequest\n\n_CON_POOL = None\n\"\"\":type: urllib3.PoolManager\"\"\"\nCON_POOL_SIZE = 1\n\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\n\n\ndef _get_con_pool():\n global _CON_POOL\n\n if _CON_POOL is not None:\n return _CON_POOL\n\n _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where(),\n socket_options=HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ])\n return _CON_POOL\n\n\ndef is_con_pool_initialized():\n return _CON_POOL is not None\n\n\ndef stop_con_pool():\n global _CON_POOL\n if _CON_POOL is not None:\n _CON_POOL.clear()\n _CON_POOL = None\n\n\ndef _parse(json_data):\n \"\"\"Try and parse the JSON returned from Telegram.\n\n Returns:\n dict: A JSON parsed as Python dict with results - on error this dict will be empty.\n\n \"\"\"\n decoded_s = json_data.decode('utf-8')\n try:\n data = json.loads(decoded_s)\n except ValueError:\n raise TelegramError('Invalid server response')\n\n if not data.get('ok') and data.get('description'):\n return data['description']\n\n return data['result']\n\n\ndef _request_wrapper(*args, **kwargs):\n \"\"\"Wraps urllib3 request for handling known exceptions.\n\n Args:\n args: unnamed arguments, passed to urllib3 request.\n kwargs: keyword arguments, passed tp urllib3 request.\n\n Returns:\n str: A non-parsed JSON text.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n try:\n resp = _get_con_pool().request(*args, **kwargs)\n except urllib3.exceptions.TimeoutError as error:\n raise TimedOut()\n except urllib3.exceptions.HTTPError as error:\n # HTTPError must come last as its the base urllib3 exception class\n # TODO: do something smart here; for now just raise NetworkError\n raise NetworkError('urllib3 HTTPError {0}'.format(error))\n\n if 200 <= resp.status <= 299:\n # 200-299 range are HTTP success statuses\n return resp.data\n\n try:\n message = _parse(resp.data)\n except ValueError:\n raise NetworkError('Unknown HTTPError {0}'.format(resp.status))\n\n if resp.status in (401, 403):\n raise Unauthorized()\n elif resp.status == 400:\n raise BadRequest(repr(message))\n elif resp.status == 502:\n raise NetworkError('Bad Gateway')\n else:\n raise NetworkError('{0} ({1})'.format(message, resp.status))\n\n\ndef get(url):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n\n Returns:\n A JSON object.\n\n \"\"\"\n result = _request_wrapper('GET', url)\n\n return _parse(result)\n\n\ndef post(url, data, timeout=None):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n data:\n A dict of (str, unicode) key/value pairs.\n timeout:\n float. If this value is specified, use it as the definitive timeout (in\n seconds) for urlopen() operations. [Optional]\n\n Notes:\n If neither `timeout` nor `data['timeout']` is specified. The underlying\n defaults are used.\n\n Returns:\n A JSON object.\n\n \"\"\"\n urlopen_kwargs = {}\n\n if timeout is not None:\n urlopen_kwargs['timeout'] = timeout\n\n if InputFile.is_inputfile(data):\n data = InputFile(data)\n result = _request_wrapper('POST', url, body=data.to_form(), headers=data.headers)\n else:\n data = json.dumps(data)\n result = _request_wrapper('POST',\n url,\n body=data.encode(),\n headers={'Content-Type': 'application/json'},\n **urlopen_kwargs)\n\n return _parse(result)\n\n\ndef download(url, filename):\n \"\"\"Download a file by its URL.\n Args:\n url:\n The web location we want to retrieve.\n\n filename:\n The filename within the path to download the file.\n\n \"\"\"\n buf = _request_wrapper('GET', url)\n with open(filename, 'wb') as fobj:\n fobj.write(buf)\n", "path": "telegram/utils/request.py"}, {"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"A library that provides a Python interface to the Telegram Bot API\"\"\"\n\nfrom sys import version_info\n\nfrom .base import TelegramObject\nfrom .user import User\nfrom .chat import Chat\nfrom .chatmember import ChatMember\nfrom .photosize import PhotoSize\nfrom .audio import Audio\nfrom .voice import Voice\nfrom .document import Document\nfrom .sticker import Sticker\nfrom .video import Video\nfrom .contact import Contact\nfrom .location import Location\nfrom .venue import Venue\nfrom .chataction import ChatAction\nfrom .userprofilephotos import UserProfilePhotos\nfrom .keyboardbutton import KeyboardButton\nfrom .replymarkup import ReplyMarkup\nfrom .replykeyboardmarkup import ReplyKeyboardMarkup\nfrom .replykeyboardhide import ReplyKeyboardHide\nfrom .forcereply import ForceReply\nfrom .error import TelegramError\nfrom .inputfile import InputFile\nfrom .file import File\nfrom .nullhandler import NullHandler\nfrom .emoji import Emoji\nfrom .parsemode import ParseMode\nfrom .messageentity import MessageEntity\nfrom .message import Message\nfrom .inputmessagecontent import InputMessageContent\nfrom .callbackquery import CallbackQuery\nfrom .choseninlineresult import ChosenInlineResult\nfrom .inlinekeyboardbutton import InlineKeyboardButton\nfrom .inlinekeyboardmarkup import InlineKeyboardMarkup\nfrom .inlinequery import InlineQuery\nfrom .inlinequeryresult import InlineQueryResult\nfrom .inlinequeryresultarticle import InlineQueryResultArticle\nfrom .inlinequeryresultaudio import InlineQueryResultAudio\nfrom .inlinequeryresultcachedaudio import InlineQueryResultCachedAudio\nfrom .inlinequeryresultcacheddocument import InlineQueryResultCachedDocument\nfrom .inlinequeryresultcachedgif import InlineQueryResultCachedGif\nfrom .inlinequeryresultcachedmpeg4gif import InlineQueryResultCachedMpeg4Gif\nfrom .inlinequeryresultcachedphoto import InlineQueryResultCachedPhoto\nfrom .inlinequeryresultcachedsticker import InlineQueryResultCachedSticker\nfrom .inlinequeryresultcachedvideo import InlineQueryResultCachedVideo\nfrom .inlinequeryresultcachedvoice import InlineQueryResultCachedVoice\nfrom .inlinequeryresultcontact import InlineQueryResultContact\nfrom .inlinequeryresultdocument import InlineQueryResultDocument\nfrom .inlinequeryresultgif import InlineQueryResultGif\nfrom .inlinequeryresultlocation import InlineQueryResultLocation\nfrom .inlinequeryresultmpeg4gif import InlineQueryResultMpeg4Gif\nfrom .inlinequeryresultphoto import InlineQueryResultPhoto\nfrom .inlinequeryresultvenue import InlineQueryResultVenue\nfrom .inlinequeryresultvideo import InlineQueryResultVideo\nfrom .inlinequeryresultvoice import InlineQueryResultVoice\nfrom .inputtextmessagecontent import InputTextMessageContent\nfrom .inputlocationmessagecontent import InputLocationMessageContent\nfrom .inputvenuemessagecontent import InputVenueMessageContent\nfrom .inputcontactmessagecontent import InputContactMessageContent\nfrom .update import Update\nfrom .bot import Bot\n\n__author__ = '[email protected]'\n__version__ = '4.3.2'\n__all__ = ['Audio', 'Bot', 'Chat', 'ChatMember', 'ChatAction', 'ChosenInlineResult',\n 'CallbackQuery', 'Contact', 'Document', 'Emoji', 'File', 'ForceReply',\n 'InlineKeyboardButton', 'InlineKeyboardMarkup', 'InlineQuery', 'InlineQueryResult',\n 'InlineQueryResult', 'InlineQueryResultArticle', 'InlineQueryResultAudio',\n 'InlineQueryResultCachedAudio', 'InlineQueryResultCachedDocument',\n 'InlineQueryResultCachedGif', 'InlineQueryResultCachedMpeg4Gif',\n 'InlineQueryResultCachedPhoto', 'InlineQueryResultCachedSticker',\n 'InlineQueryResultCachedVideo', 'InlineQueryResultCachedVoice',\n 'InlineQueryResultContact', 'InlineQueryResultDocument', 'InlineQueryResultGif',\n 'InlineQueryResultLocation', 'InlineQueryResultMpeg4Gif', 'InlineQueryResultPhoto',\n 'InlineQueryResultVenue', 'InlineQueryResultVideo', 'InlineQueryResultVoice',\n 'InputContactMessageContent', 'InputFile', 'InputLocationMessageContent',\n 'InputMessageContent', 'InputTextMessageContent', 'InputVenueMessageContent',\n 'KeyboardButton', 'Location', 'Message', 'MessageEntity', 'NullHandler', 'ParseMode',\n 'PhotoSize', 'ReplyKeyboardHide', 'ReplyKeyboardMarkup', 'ReplyMarkup', 'Sticker',\n 'TelegramError', 'TelegramObject', 'Update', 'User', 'UserProfilePhotos', 'Venue',\n 'Video', 'Voice']\n\nif version_info < (2, 7):\n from warnings import warn\n warn(\"python-telegram-bot will stop supporting Python 2.6 in a future release. \"\n \"Please upgrade your Python version to at least Python 2.7!\")\n", "path": "telegram/__init__.py"}]} | 3,873 | 938 |
gh_patches_debug_36419 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
house_au: split off new brand for House Bed & Bath
house_au captures two brands:
* [House](https://www.wikidata.org/wiki/Q117921987)
* House Bed & Bath (https://www.wikidata.org/wiki/Q126176210)
Currently the spider doesn't differentiate the two brands. It should though.
Reference: https://globalretailbrands.net/
</issue>
<code>
[start of locations/spiders/house_au.py]
1 import reverse_geocoder
2 from scrapy import Request, Spider
3
4 from locations.categories import Categories
5 from locations.dict_parser import DictParser
6 from locations.hours import OpeningHours
7 from locations.pipelines.address_clean_up import clean_address
8
9
10 class HouseAUSpider(Spider):
11 name = "house_au"
12 item_attributes = {
13 "brand": "House",
14 "brand_wikidata": "Q117921987",
15 "extras": Categories.SHOP_HOUSEWARE.value,
16 }
17 allowed_domains = ["www.house.com.au"]
18 start_urls = ["https://www.house.com.au/api/get-stores"]
19
20 def start_requests(self):
21 for url in self.start_urls:
22 yield Request(url=url, method="POST")
23
24 def parse(self, response):
25 for location in response.json():
26 item = DictParser.parse(location)
27
28 # Some stores have wildly incorrect coordinates for
29 # locations as far away as France. Only add geometry
30 # where coordinates existing within Australia.
31 if result := reverse_geocoder.get((location["latitude"], location["longitude"]), mode=1, verbose=False):
32 if result["cc"] == "AU":
33 item["geometry"] = location["location"]
34
35 item["street_address"] = clean_address([location["address1"], location["address2"]])
36 item["website"] = "https://www.house.com.au/stores/" + location["slug"]
37 item["opening_hours"] = OpeningHours()
38 for day_name, hours in location["storeHours"].items():
39 if hours["open"] == "-" or hours["close"] == "-" or hours["close"] == "17:3016:00":
40 continue
41 item["opening_hours"].add_range(
42 day_name.title(),
43 hours["open"].replace(".", ":"),
44 hours["close"].replace(".", ":").replace(":-", ":"),
45 )
46 yield item
47
[end of locations/spiders/house_au.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/house_au.py b/locations/spiders/house_au.py
--- a/locations/spiders/house_au.py
+++ b/locations/spiders/house_au.py
@@ -9,13 +9,16 @@
class HouseAUSpider(Spider):
name = "house_au"
- item_attributes = {
- "brand": "House",
- "brand_wikidata": "Q117921987",
- "extras": Categories.SHOP_HOUSEWARE.value,
- }
allowed_domains = ["www.house.com.au"]
start_urls = ["https://www.house.com.au/api/get-stores"]
+ brands = {
+ "House Bed & Bath": {
+ "brand": "House Bed & Bath",
+ "brand_wikidata": "",
+ "extras": Categories.SHOP_HOUSEHOLD_LINEN.value,
+ },
+ "House": {"brand": "House", "brand_wikidata": "Q117921987", "extras": Categories.SHOP_HOUSEWARE.value},
+ }
def start_requests(self):
for url in self.start_urls:
@@ -25,6 +28,12 @@
for location in response.json():
item = DictParser.parse(location)
+ for brand_name in self.brands.keys():
+ if item["name"].startswith(f"{brand_name} "):
+ item.update(self.brands[brand_name])
+ item["branch"] = item["name"].replace(f"{brand_name} ", "")
+ break
+
# Some stores have wildly incorrect coordinates for
# locations as far away as France. Only add geometry
# where coordinates existing within Australia.
@@ -34,6 +43,7 @@
item["street_address"] = clean_address([location["address1"], location["address2"]])
item["website"] = "https://www.house.com.au/stores/" + location["slug"]
+
item["opening_hours"] = OpeningHours()
for day_name, hours in location["storeHours"].items():
if hours["open"] == "-" or hours["close"] == "-" or hours["close"] == "17:3016:00":
@@ -43,4 +53,5 @@
hours["open"].replace(".", ":"),
hours["close"].replace(".", ":").replace(":-", ":"),
)
+
yield item
| {"golden_diff": "diff --git a/locations/spiders/house_au.py b/locations/spiders/house_au.py\n--- a/locations/spiders/house_au.py\n+++ b/locations/spiders/house_au.py\n@@ -9,13 +9,16 @@\n \n class HouseAUSpider(Spider):\n name = \"house_au\"\n- item_attributes = {\n- \"brand\": \"House\",\n- \"brand_wikidata\": \"Q117921987\",\n- \"extras\": Categories.SHOP_HOUSEWARE.value,\n- }\n allowed_domains = [\"www.house.com.au\"]\n start_urls = [\"https://www.house.com.au/api/get-stores\"]\n+ brands = {\n+ \"House Bed & Bath\": {\n+ \"brand\": \"House Bed & Bath\",\n+ \"brand_wikidata\": \"\",\n+ \"extras\": Categories.SHOP_HOUSEHOLD_LINEN.value,\n+ },\n+ \"House\": {\"brand\": \"House\", \"brand_wikidata\": \"Q117921987\", \"extras\": Categories.SHOP_HOUSEWARE.value},\n+ }\n \n def start_requests(self):\n for url in self.start_urls:\n@@ -25,6 +28,12 @@\n for location in response.json():\n item = DictParser.parse(location)\n \n+ for brand_name in self.brands.keys():\n+ if item[\"name\"].startswith(f\"{brand_name} \"):\n+ item.update(self.brands[brand_name])\n+ item[\"branch\"] = item[\"name\"].replace(f\"{brand_name} \", \"\")\n+ break\n+\n # Some stores have wildly incorrect coordinates for\n # locations as far away as France. Only add geometry\n # where coordinates existing within Australia.\n@@ -34,6 +43,7 @@\n \n item[\"street_address\"] = clean_address([location[\"address1\"], location[\"address2\"]])\n item[\"website\"] = \"https://www.house.com.au/stores/\" + location[\"slug\"]\n+\n item[\"opening_hours\"] = OpeningHours()\n for day_name, hours in location[\"storeHours\"].items():\n if hours[\"open\"] == \"-\" or hours[\"close\"] == \"-\" or hours[\"close\"] == \"17:3016:00\":\n@@ -43,4 +53,5 @@\n hours[\"open\"].replace(\".\", \":\"),\n hours[\"close\"].replace(\".\", \":\").replace(\":-\", \":\"),\n )\n+\n yield item\n", "issue": "house_au: split off new brand for House Bed & Bath\nhouse_au captures two brands:\r\n* [House](https://www.wikidata.org/wiki/Q117921987)\r\n* House Bed & Bath (https://www.wikidata.org/wiki/Q126176210)\r\n\r\nCurrently the spider doesn't differentiate the two brands. It should though.\r\n\r\nReference: https://globalretailbrands.net/\n", "before_files": [{"content": "import reverse_geocoder\nfrom scrapy import Request, Spider\n\nfrom locations.categories import Categories\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\nfrom locations.pipelines.address_clean_up import clean_address\n\n\nclass HouseAUSpider(Spider):\n name = \"house_au\"\n item_attributes = {\n \"brand\": \"House\",\n \"brand_wikidata\": \"Q117921987\",\n \"extras\": Categories.SHOP_HOUSEWARE.value,\n }\n allowed_domains = [\"www.house.com.au\"]\n start_urls = [\"https://www.house.com.au/api/get-stores\"]\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, method=\"POST\")\n\n def parse(self, response):\n for location in response.json():\n item = DictParser.parse(location)\n\n # Some stores have wildly incorrect coordinates for\n # locations as far away as France. Only add geometry\n # where coordinates existing within Australia.\n if result := reverse_geocoder.get((location[\"latitude\"], location[\"longitude\"]), mode=1, verbose=False):\n if result[\"cc\"] == \"AU\":\n item[\"geometry\"] = location[\"location\"]\n\n item[\"street_address\"] = clean_address([location[\"address1\"], location[\"address2\"]])\n item[\"website\"] = \"https://www.house.com.au/stores/\" + location[\"slug\"]\n item[\"opening_hours\"] = OpeningHours()\n for day_name, hours in location[\"storeHours\"].items():\n if hours[\"open\"] == \"-\" or hours[\"close\"] == \"-\" or hours[\"close\"] == \"17:3016:00\":\n continue\n item[\"opening_hours\"].add_range(\n day_name.title(),\n hours[\"open\"].replace(\".\", \":\"),\n hours[\"close\"].replace(\".\", \":\").replace(\":-\", \":\"),\n )\n yield item\n", "path": "locations/spiders/house_au.py"}]} | 1,124 | 544 |
gh_patches_debug_27000 | rasdani/github-patches | git_diff | iterative__dvc-1808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`dvc remote add` replaces the existing remote silently
The `dvc remote add` command ignores the existing remote and overwrites it silently.
```
→ dvc --version
0.32.1+7d7ed4
```
### To reproduce
```bash
dvc remote add s3 s3://bucket/subdir
dvc remote add s3 s3://bucket/subdir2
```
### Expected behavior
The second command `dvc remote add s3 s3://bucket/subdir2` should fail with the `Remote with name "s3" already exists` message.
### Current behavior
Remote URL is silently overwriten:
```bash
> cat .dvc/config
['remote "s3"']
url = s3://bucket/subdir2
```
</issue>
<code>
[start of dvc/command/remote.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import re
6
7 import dvc.logger as logger
8 from dvc.command.base import fix_subparsers, append_doc_link
9 from dvc.config import Config
10 from dvc.command.config import CmdConfig
11
12
13 class CmdRemoteAdd(CmdConfig):
14 @staticmethod
15 def resolve_path(path, config_file):
16 """Resolve path relative to config file location.
17
18 Args:
19 path: Path to be resolved.
20 config_file: Path to config file, which `path` is specified
21 relative to.
22
23 Returns:
24 Path relative to the `config_file` location. If `path` is an
25 absolute path then it will be returned without change.
26
27 """
28 if os.path.isabs(path):
29 return path
30 return os.path.relpath(path, os.path.dirname(config_file))
31
32 def run(self):
33 from dvc.remote import _get, RemoteLOCAL
34
35 remote = _get({Config.SECTION_REMOTE_URL: self.args.url})
36 if remote == RemoteLOCAL:
37 self.args.url = self.resolve_path(
38 self.args.url, self.configobj.filename
39 )
40
41 section = Config.SECTION_REMOTE_FMT.format(self.args.name)
42 ret = self._set(section, Config.SECTION_REMOTE_URL, self.args.url)
43 if ret != 0:
44 return ret
45
46 if self.args.default:
47 msg = "Setting '{}' as a default remote.".format(self.args.name)
48 logger.info(msg)
49 ret = self._set(
50 Config.SECTION_CORE, Config.SECTION_CORE_REMOTE, self.args.name
51 )
52
53 return ret
54
55
56 class CmdRemoteRemove(CmdConfig):
57 def _remove_default(self, config):
58 core = config.get(Config.SECTION_CORE, None)
59 if core is None:
60 return 0
61
62 default = core.get(Config.SECTION_CORE_REMOTE, None)
63 if default is None:
64 return 0
65
66 if default == self.args.name:
67 return self._unset(
68 Config.SECTION_CORE,
69 opt=Config.SECTION_CORE_REMOTE,
70 configobj=config,
71 )
72
73 def run(self):
74 section = Config.SECTION_REMOTE_FMT.format(self.args.name)
75 ret = self._unset(section)
76 if ret != 0:
77 return ret
78
79 for configobj in [
80 self.config._local_config,
81 self.config._repo_config,
82 self.config._global_config,
83 self.config._system_config,
84 ]:
85 self._remove_default(configobj)
86 self.config.save(configobj)
87 if configobj == self.configobj:
88 break
89
90 return 0
91
92
93 class CmdRemoteModify(CmdConfig):
94 def run(self):
95 section = Config.SECTION_REMOTE_FMT.format(self.args.name)
96 self.args.name = "{}.{}".format(section, self.args.option)
97 return super(CmdRemoteModify, self).run()
98
99
100 class CmdRemoteDefault(CmdConfig):
101 def run(self):
102 self.args.value = self.args.name
103 self.args.name = "core.remote"
104 return super(CmdRemoteDefault, self).run()
105
106
107 class CmdRemoteList(CmdConfig):
108 def run(self):
109 for section in self.configobj.keys():
110 r = re.match(Config.SECTION_REMOTE_REGEX, section)
111 if r:
112 name = r.group("name")
113 url = self.configobj[section].get(
114 Config.SECTION_REMOTE_URL, ""
115 )
116 logger.info("{}\t{}".format(name, url))
117 return 0
118
119
120 def add_parser(subparsers, parent_parser):
121 from dvc.command.config import parent_config_parser
122
123 REMOTE_HELP = "Manage remote storage configuration."
124 remote_parser = subparsers.add_parser(
125 "remote",
126 parents=[parent_parser],
127 description=append_doc_link(REMOTE_HELP, "remote"),
128 help=REMOTE_HELP,
129 formatter_class=argparse.RawDescriptionHelpFormatter,
130 )
131
132 remote_subparsers = remote_parser.add_subparsers(
133 dest="cmd",
134 help="Use dvc remote CMD --help for " "command-specific help.",
135 )
136
137 fix_subparsers(remote_subparsers)
138
139 REMOTE_ADD_HELP = "Add remote."
140 remote_add_parser = remote_subparsers.add_parser(
141 "add",
142 parents=[parent_config_parser, parent_parser],
143 description=append_doc_link(REMOTE_ADD_HELP, "remote-add"),
144 help=REMOTE_ADD_HELP,
145 formatter_class=argparse.RawDescriptionHelpFormatter,
146 )
147 remote_add_parser.add_argument("name", help="Name.")
148 remote_add_parser.add_argument(
149 "url",
150 help="URL. See full list of supported urls at " "man.dvc.org/remote",
151 )
152 remote_add_parser.add_argument(
153 "-d",
154 "--default",
155 action="store_true",
156 default=False,
157 help="Set as default remote.",
158 )
159 remote_add_parser.set_defaults(func=CmdRemoteAdd)
160
161 REMOTE_DEFAULT_HELP = "Set/unset default remote."
162 remote_default_parser = remote_subparsers.add_parser(
163 "default",
164 parents=[parent_config_parser, parent_parser],
165 description=append_doc_link(REMOTE_DEFAULT_HELP, "remote-default"),
166 help=REMOTE_DEFAULT_HELP,
167 formatter_class=argparse.RawDescriptionHelpFormatter,
168 )
169 remote_default_parser.add_argument(
170 "name", nargs="?", help="Name of the remote."
171 )
172 remote_default_parser.add_argument(
173 "-u",
174 "--unset",
175 action="store_true",
176 default=False,
177 help="Unset default remote.",
178 )
179 remote_default_parser.set_defaults(func=CmdRemoteDefault)
180
181 REMOTE_REMOVE_HELP = "Remove remote."
182 remote_remove_parser = remote_subparsers.add_parser(
183 "remove",
184 parents=[parent_config_parser, parent_parser],
185 description=append_doc_link(REMOTE_REMOVE_HELP, "remote-remove"),
186 help=REMOTE_REMOVE_HELP,
187 formatter_class=argparse.RawDescriptionHelpFormatter,
188 )
189 remote_remove_parser.add_argument("name", help="Name")
190 remote_remove_parser.set_defaults(func=CmdRemoteRemove)
191
192 REMOTE_MODIFY_HELP = "Modify remote."
193 remote_modify_parser = remote_subparsers.add_parser(
194 "modify",
195 parents=[parent_config_parser, parent_parser],
196 description=append_doc_link(REMOTE_MODIFY_HELP, "remote-modify"),
197 help=REMOTE_MODIFY_HELP,
198 formatter_class=argparse.RawDescriptionHelpFormatter,
199 )
200 remote_modify_parser.add_argument("name", help="Name.")
201 remote_modify_parser.add_argument("option", help="Option.")
202 remote_modify_parser.add_argument("value", nargs="?", help="Value.")
203 remote_modify_parser.add_argument(
204 "-u",
205 "--unset",
206 default=False,
207 action="store_true",
208 help="Unset option.",
209 )
210 remote_modify_parser.set_defaults(func=CmdRemoteModify)
211
212 REMOTE_LIST_HELP = "List available remotes."
213 remote_list_parser = remote_subparsers.add_parser(
214 "list",
215 parents=[parent_config_parser, parent_parser],
216 description=append_doc_link(REMOTE_LIST_HELP, "remote-list"),
217 help=REMOTE_LIST_HELP,
218 formatter_class=argparse.RawDescriptionHelpFormatter,
219 )
220 remote_list_parser.set_defaults(func=CmdRemoteList)
221
[end of dvc/command/remote.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/command/remote.py b/dvc/command/remote.py
--- a/dvc/command/remote.py
+++ b/dvc/command/remote.py
@@ -5,9 +5,9 @@
import re
import dvc.logger as logger
-from dvc.command.base import fix_subparsers, append_doc_link
-from dvc.config import Config
+from dvc.command.base import append_doc_link, fix_subparsers
from dvc.command.config import CmdConfig
+from dvc.config import Config
class CmdRemoteAdd(CmdConfig):
@@ -39,6 +39,14 @@
)
section = Config.SECTION_REMOTE_FMT.format(self.args.name)
+ if (section in self.configobj.keys()) and not self.args.force:
+ logger.error(
+ "Remote with name {} already exists. "
+ "Use -f (--force) to overwrite remote "
+ "with new value".format(self.args.name)
+ )
+ return 1
+
ret = self._set(section, Config.SECTION_REMOTE_URL, self.args.url)
if ret != 0:
return ret
@@ -156,6 +164,13 @@
default=False,
help="Set as default remote.",
)
+ remote_add_parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ default=False,
+ help="Force overwriting existing configs",
+ )
remote_add_parser.set_defaults(func=CmdRemoteAdd)
REMOTE_DEFAULT_HELP = "Set/unset default remote."
| {"golden_diff": "diff --git a/dvc/command/remote.py b/dvc/command/remote.py\n--- a/dvc/command/remote.py\n+++ b/dvc/command/remote.py\n@@ -5,9 +5,9 @@\n import re\n \n import dvc.logger as logger\n-from dvc.command.base import fix_subparsers, append_doc_link\n-from dvc.config import Config\n+from dvc.command.base import append_doc_link, fix_subparsers\n from dvc.command.config import CmdConfig\n+from dvc.config import Config\n \n \n class CmdRemoteAdd(CmdConfig):\n@@ -39,6 +39,14 @@\n )\n \n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n+ if (section in self.configobj.keys()) and not self.args.force:\n+ logger.error(\n+ \"Remote with name {} already exists. \"\n+ \"Use -f (--force) to overwrite remote \"\n+ \"with new value\".format(self.args.name)\n+ )\n+ return 1\n+\n ret = self._set(section, Config.SECTION_REMOTE_URL, self.args.url)\n if ret != 0:\n return ret\n@@ -156,6 +164,13 @@\n default=False,\n help=\"Set as default remote.\",\n )\n+ remote_add_parser.add_argument(\n+ \"-f\",\n+ \"--force\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Force overwriting existing configs\",\n+ )\n remote_add_parser.set_defaults(func=CmdRemoteAdd)\n \n REMOTE_DEFAULT_HELP = \"Set/unset default remote.\"\n", "issue": "`dvc remote add` replaces the existing remote silently\nThe `dvc remote add` command ignores the existing remote and overwrites it silently. \r\n```\r\n\u2192 dvc --version\r\n0.32.1+7d7ed4\r\n```\r\n\r\n### To reproduce\r\n\r\n```bash\r\ndvc remote add s3 s3://bucket/subdir\r\ndvc remote add s3 s3://bucket/subdir2\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe second command `dvc remote add s3 s3://bucket/subdir2` should fail with the `Remote with name \"s3\" already exists` message.\r\n\r\n### Current behavior\r\n\r\nRemote URL is silently overwriten:\r\n```bash\r\n> cat .dvc/config\r\n['remote \"s3\"']\r\nurl = s3://bucket/subdir2\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport re\n\nimport dvc.logger as logger\nfrom dvc.command.base import fix_subparsers, append_doc_link\nfrom dvc.config import Config\nfrom dvc.command.config import CmdConfig\n\n\nclass CmdRemoteAdd(CmdConfig):\n @staticmethod\n def resolve_path(path, config_file):\n \"\"\"Resolve path relative to config file location.\n\n Args:\n path: Path to be resolved.\n config_file: Path to config file, which `path` is specified\n relative to.\n\n Returns:\n Path relative to the `config_file` location. If `path` is an\n absolute path then it will be returned without change.\n\n \"\"\"\n if os.path.isabs(path):\n return path\n return os.path.relpath(path, os.path.dirname(config_file))\n\n def run(self):\n from dvc.remote import _get, RemoteLOCAL\n\n remote = _get({Config.SECTION_REMOTE_URL: self.args.url})\n if remote == RemoteLOCAL:\n self.args.url = self.resolve_path(\n self.args.url, self.configobj.filename\n )\n\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n ret = self._set(section, Config.SECTION_REMOTE_URL, self.args.url)\n if ret != 0:\n return ret\n\n if self.args.default:\n msg = \"Setting '{}' as a default remote.\".format(self.args.name)\n logger.info(msg)\n ret = self._set(\n Config.SECTION_CORE, Config.SECTION_CORE_REMOTE, self.args.name\n )\n\n return ret\n\n\nclass CmdRemoteRemove(CmdConfig):\n def _remove_default(self, config):\n core = config.get(Config.SECTION_CORE, None)\n if core is None:\n return 0\n\n default = core.get(Config.SECTION_CORE_REMOTE, None)\n if default is None:\n return 0\n\n if default == self.args.name:\n return self._unset(\n Config.SECTION_CORE,\n opt=Config.SECTION_CORE_REMOTE,\n configobj=config,\n )\n\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n ret = self._unset(section)\n if ret != 0:\n return ret\n\n for configobj in [\n self.config._local_config,\n self.config._repo_config,\n self.config._global_config,\n self.config._system_config,\n ]:\n self._remove_default(configobj)\n self.config.save(configobj)\n if configobj == self.configobj:\n break\n\n return 0\n\n\nclass CmdRemoteModify(CmdConfig):\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n self.args.name = \"{}.{}\".format(section, self.args.option)\n return super(CmdRemoteModify, self).run()\n\n\nclass CmdRemoteDefault(CmdConfig):\n def run(self):\n self.args.value = self.args.name\n self.args.name = \"core.remote\"\n return super(CmdRemoteDefault, self).run()\n\n\nclass CmdRemoteList(CmdConfig):\n def run(self):\n for section in self.configobj.keys():\n r = re.match(Config.SECTION_REMOTE_REGEX, section)\n if r:\n name = r.group(\"name\")\n url = self.configobj[section].get(\n Config.SECTION_REMOTE_URL, \"\"\n )\n logger.info(\"{}\\t{}\".format(name, url))\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n from dvc.command.config import parent_config_parser\n\n REMOTE_HELP = \"Manage remote storage configuration.\"\n remote_parser = subparsers.add_parser(\n \"remote\",\n parents=[parent_parser],\n description=append_doc_link(REMOTE_HELP, \"remote\"),\n help=REMOTE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n remote_subparsers = remote_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use dvc remote CMD --help for \" \"command-specific help.\",\n )\n\n fix_subparsers(remote_subparsers)\n\n REMOTE_ADD_HELP = \"Add remote.\"\n remote_add_parser = remote_subparsers.add_parser(\n \"add\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(REMOTE_ADD_HELP, \"remote-add\"),\n help=REMOTE_ADD_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remote_add_parser.add_argument(\"name\", help=\"Name.\")\n remote_add_parser.add_argument(\n \"url\",\n help=\"URL. See full list of supported urls at \" \"man.dvc.org/remote\",\n )\n remote_add_parser.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n default=False,\n help=\"Set as default remote.\",\n )\n remote_add_parser.set_defaults(func=CmdRemoteAdd)\n\n REMOTE_DEFAULT_HELP = \"Set/unset default remote.\"\n remote_default_parser = remote_subparsers.add_parser(\n \"default\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(REMOTE_DEFAULT_HELP, \"remote-default\"),\n help=REMOTE_DEFAULT_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remote_default_parser.add_argument(\n \"name\", nargs=\"?\", help=\"Name of the remote.\"\n )\n remote_default_parser.add_argument(\n \"-u\",\n \"--unset\",\n action=\"store_true\",\n default=False,\n help=\"Unset default remote.\",\n )\n remote_default_parser.set_defaults(func=CmdRemoteDefault)\n\n REMOTE_REMOVE_HELP = \"Remove remote.\"\n remote_remove_parser = remote_subparsers.add_parser(\n \"remove\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(REMOTE_REMOVE_HELP, \"remote-remove\"),\n help=REMOTE_REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remote_remove_parser.add_argument(\"name\", help=\"Name\")\n remote_remove_parser.set_defaults(func=CmdRemoteRemove)\n\n REMOTE_MODIFY_HELP = \"Modify remote.\"\n remote_modify_parser = remote_subparsers.add_parser(\n \"modify\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(REMOTE_MODIFY_HELP, \"remote-modify\"),\n help=REMOTE_MODIFY_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remote_modify_parser.add_argument(\"name\", help=\"Name.\")\n remote_modify_parser.add_argument(\"option\", help=\"Option.\")\n remote_modify_parser.add_argument(\"value\", nargs=\"?\", help=\"Value.\")\n remote_modify_parser.add_argument(\n \"-u\",\n \"--unset\",\n default=False,\n action=\"store_true\",\n help=\"Unset option.\",\n )\n remote_modify_parser.set_defaults(func=CmdRemoteModify)\n\n REMOTE_LIST_HELP = \"List available remotes.\"\n remote_list_parser = remote_subparsers.add_parser(\n \"list\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(REMOTE_LIST_HELP, \"remote-list\"),\n help=REMOTE_LIST_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remote_list_parser.set_defaults(func=CmdRemoteList)\n", "path": "dvc/command/remote.py"}]} | 2,792 | 342 |
gh_patches_debug_7899 | rasdani/github-patches | git_diff | cloudtools__troposphere-1692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
implement AWS::CodeStarConnections changes from May 14, 2020 update
</issue>
<code>
[start of troposphere/codestarconnections.py]
1 # Copyright (c) 2012-2020, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6
7 from . import AWSObject
8
9
10 VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')
11
12
13 def validate_connection_providertype(connection_providertype):
14 """Validate ProviderType for Connection"""
15
16 if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:
17 raise ValueError("Connection ProviderType must be one of: %s" %
18 ", ".join(VALID_CONNECTION_PROVIDERTYPE))
19 return connection_providertype
20
21
22 class Connection(AWSObject):
23 resource_type = "AWS::CodeStarConnections::Connection"
24
25 props = {
26 'ConnectionName': (basestring, True),
27 'ProviderType': (validate_connection_providertype, True),
28 }
29
[end of troposphere/codestarconnections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/codestarconnections.py b/troposphere/codestarconnections.py
--- a/troposphere/codestarconnections.py
+++ b/troposphere/codestarconnections.py
@@ -4,7 +4,7 @@
# See LICENSE file for full license.
-from . import AWSObject
+from . import AWSObject, Tags
VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')
@@ -25,4 +25,5 @@
props = {
'ConnectionName': (basestring, True),
'ProviderType': (validate_connection_providertype, True),
+ 'Tags': (Tags, False),
}
| {"golden_diff": "diff --git a/troposphere/codestarconnections.py b/troposphere/codestarconnections.py\n--- a/troposphere/codestarconnections.py\n+++ b/troposphere/codestarconnections.py\n@@ -4,7 +4,7 @@\n # See LICENSE file for full license.\n \n \n-from . import AWSObject\n+from . import AWSObject, Tags\n \n \n VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')\n@@ -25,4 +25,5 @@\n props = {\n 'ConnectionName': (basestring, True),\n 'ProviderType': (validate_connection_providertype, True),\n+ 'Tags': (Tags, False),\n }\n", "issue": "implement AWS::CodeStarConnections changes from May 14, 2020 update\n\n", "before_files": [{"content": "# Copyright (c) 2012-2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\nfrom . import AWSObject\n\n\nVALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')\n\n\ndef validate_connection_providertype(connection_providertype):\n \"\"\"Validate ProviderType for Connection\"\"\"\n\n if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:\n raise ValueError(\"Connection ProviderType must be one of: %s\" %\n \", \".join(VALID_CONNECTION_PROVIDERTYPE))\n return connection_providertype\n\n\nclass Connection(AWSObject):\n resource_type = \"AWS::CodeStarConnections::Connection\"\n\n props = {\n 'ConnectionName': (basestring, True),\n 'ProviderType': (validate_connection_providertype, True),\n }\n", "path": "troposphere/codestarconnections.py"}]} | 789 | 142 |
gh_patches_debug_1243 | rasdani/github-patches | git_diff | iterative__dvc-3828 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
End of file fixer
I am using an [end-of-file fixer in the pre-commit hook](https://pre-commit.com/hooks.html). It checks that the file ends with an empty new line.
It looks like files
```
modified: .dvc/plots/confusion.json
modified: .dvc/plots/default.json
modified: .dvc/plots/scatter.json
```
That are automatically created by `dvc init` do not have an empty line at the end of the file.
</issue>
<code>
[start of dvc/repo/plots/template.py]
1 import json
2 import logging
3 import os
4 import re
5
6 from funcy import cached_property
7
8 from dvc.exceptions import DvcException
9 from dvc.utils.fs import makedirs
10
11 logger = logging.getLogger(__name__)
12
13
14 class TemplateNotFoundError(DvcException):
15 def __init__(self, path):
16 super().__init__(f"Template '{path}' not found.")
17
18
19 class NoDataForTemplateError(DvcException):
20 def __init__(self, template_path):
21 super().__init__(
22 "No data provided for '{}'.".format(os.path.relpath(template_path))
23 )
24
25
26 class NoFieldInDataError(DvcException):
27 def __init__(self, field_name):
28 super().__init__(
29 f"Field '{field_name}' does not exist in provided data."
30 )
31
32
33 class Template:
34 INDENT = 4
35 SEPARATORS = (",", ": ")
36 EXTENSION = ".json"
37 METRIC_DATA_ANCHOR = "<DVC_METRIC_DATA>"
38 X_ANCHOR = "<DVC_METRIC_X>"
39 Y_ANCHOR = "<DVC_METRIC_Y>"
40 TITLE_ANCHOR = "<DVC_METRIC_TITLE>"
41 X_TITLE_ANCHOR = "<DVC_METRIC_X_TITLE>"
42 Y_TITLE_ANCHOR = "<DVC_METRIC_Y_TITLE>"
43
44 def __init__(self, templates_dir):
45 self.plot_templates_dir = templates_dir
46
47 def dump(self):
48 makedirs(self.plot_templates_dir, exist_ok=True)
49
50 with open(
51 os.path.join(
52 self.plot_templates_dir, self.TEMPLATE_NAME + self.EXTENSION
53 ),
54 "w",
55 ) as fobj:
56 json.dump(
57 self.DEFAULT_CONTENT,
58 fobj,
59 indent=self.INDENT,
60 separators=self.SEPARATORS,
61 )
62
63 @staticmethod
64 def get_data_anchor(template_content):
65 regex = re.compile('"<DVC_METRIC_DATA[^>"]*>"')
66 return regex.findall(template_content)
67
68 @staticmethod
69 def parse_data_anchors(template_content):
70 data_files = {
71 Template.get_datafile(m)
72 for m in Template.get_data_anchor(template_content)
73 }
74 return {df for df in data_files if df}
75
76 @staticmethod
77 def get_datafile(anchor_string):
78 return (
79 anchor_string.replace("<", "")
80 .replace(">", "")
81 .replace('"', "")
82 .replace("DVC_METRIC_DATA", "")
83 .replace(",", "")
84 )
85
86 @staticmethod
87 def fill(
88 template_path,
89 data,
90 priority_datafile=None,
91 x_field=None,
92 y_field=None,
93 title=None,
94 x_title=None,
95 y_title=None,
96 ):
97 with open(template_path) as fobj:
98 result_content = fobj.read()
99
100 if x_field:
101 Template._check_field_exists(data, x_field)
102 if y_field:
103 Template._check_field_exists(data, y_field)
104
105 result_content = Template._replace_data_anchors(
106 result_content, data, priority_datafile
107 )
108
109 result_content = Template._replace_metadata_anchors(
110 result_content, title, x_field, x_title, y_field, y_title
111 )
112
113 return result_content
114
115 @staticmethod
116 def _check_field_exists(data, field):
117 for file, data_points in data.items():
118 if not any(
119 field in data_point.keys() for data_point in data_points
120 ):
121 raise NoFieldInDataError(field)
122
123 @staticmethod
124 def _replace_metadata_anchors(
125 result_content, title, x_field, x_title, y_field, y_title
126 ):
127 if Template.TITLE_ANCHOR in result_content:
128 if title:
129 result_content = result_content.replace(
130 Template.TITLE_ANCHOR, title
131 )
132 else:
133 result_content = result_content.replace(
134 Template.TITLE_ANCHOR, ""
135 )
136 if Template.X_ANCHOR in result_content and x_field:
137 result_content = result_content.replace(Template.X_ANCHOR, x_field)
138 if Template.Y_ANCHOR in result_content and y_field:
139 result_content = result_content.replace(Template.Y_ANCHOR, y_field)
140 if Template.X_TITLE_ANCHOR in result_content:
141 if not x_title and x_field:
142 x_title = x_field
143 result_content = result_content.replace(
144 Template.X_TITLE_ANCHOR, x_title
145 )
146 if Template.Y_TITLE_ANCHOR in result_content:
147 if not y_title and y_field:
148 y_title = y_field
149 result_content = result_content.replace(
150 Template.Y_TITLE_ANCHOR, y_title
151 )
152 return result_content
153
154 @staticmethod
155 def _replace_data_anchors(result_content, data, priority_datafile):
156 for anchor in Template.get_data_anchor(result_content):
157 file = Template.get_datafile(anchor)
158
159 if not file or priority_datafile:
160 key = priority_datafile
161 else:
162 key = file
163
164 result_content = result_content.replace(
165 anchor,
166 json.dumps(
167 data[key],
168 indent=Template.INDENT,
169 separators=Template.SEPARATORS,
170 sort_keys=True,
171 ),
172 )
173 return result_content
174
175
176 class DefaultLinearTemplate(Template):
177 TEMPLATE_NAME = "default"
178
179 DEFAULT_CONTENT = {
180 "$schema": "https://vega.github.io/schema/vega-lite/v4.json",
181 "data": {"values": Template.METRIC_DATA_ANCHOR},
182 "title": Template.TITLE_ANCHOR,
183 "mark": {"type": "line"},
184 "encoding": {
185 "x": {
186 "field": Template.X_ANCHOR,
187 "type": "quantitative",
188 "title": Template.X_TITLE_ANCHOR,
189 },
190 "y": {
191 "field": Template.Y_ANCHOR,
192 "type": "quantitative",
193 "title": Template.Y_TITLE_ANCHOR,
194 "scale": {"zero": False},
195 },
196 "color": {"field": "rev", "type": "nominal"},
197 },
198 }
199
200
201 class DefaultConfusionTemplate(Template):
202 TEMPLATE_NAME = "confusion"
203 DEFAULT_CONTENT = {
204 "$schema": "https://vega.github.io/schema/vega-lite/v4.json",
205 "data": {"values": Template.METRIC_DATA_ANCHOR},
206 "title": Template.TITLE_ANCHOR,
207 "mark": "rect",
208 "encoding": {
209 "x": {
210 "field": Template.X_ANCHOR,
211 "type": "nominal",
212 "sort": "ascending",
213 "title": Template.X_TITLE_ANCHOR,
214 },
215 "y": {
216 "field": Template.Y_ANCHOR,
217 "type": "nominal",
218 "sort": "ascending",
219 "title": Template.Y_TITLE_ANCHOR,
220 },
221 "color": {"aggregate": "count", "type": "quantitative"},
222 "facet": {"field": "rev", "type": "nominal"},
223 },
224 }
225
226
227 class DefaultScatterTemplate(Template):
228 TEMPLATE_NAME = "scatter"
229 DEFAULT_CONTENT = {
230 "$schema": "https://vega.github.io/schema/vega-lite/v4.json",
231 "data": {"values": Template.METRIC_DATA_ANCHOR},
232 "title": Template.TITLE_ANCHOR,
233 "mark": "point",
234 "encoding": {
235 "x": {
236 "field": Template.X_ANCHOR,
237 "type": "quantitative",
238 "title": Template.X_TITLE_ANCHOR,
239 },
240 "y": {
241 "field": Template.Y_ANCHOR,
242 "type": "quantitative",
243 "title": Template.Y_TITLE_ANCHOR,
244 "scale": {"zero": False},
245 },
246 "color": {"field": "rev", "type": "nominal"},
247 },
248 }
249
250
251 class PlotTemplates:
252 TEMPLATES_DIR = "plots"
253 TEMPLATES = [
254 DefaultLinearTemplate,
255 DefaultConfusionTemplate,
256 DefaultScatterTemplate,
257 ]
258
259 @cached_property
260 def templates_dir(self):
261 return os.path.join(self.dvc_dir, self.TEMPLATES_DIR)
262
263 @cached_property
264 def default_template(self):
265 default_plot_path = os.path.join(self.templates_dir, "default.json")
266 if not os.path.exists(default_plot_path):
267 raise TemplateNotFoundError(os.path.relpath(default_plot_path))
268 return default_plot_path
269
270 def get_template(self, path):
271 t_path = os.path.join(self.templates_dir, path)
272 if os.path.exists(t_path):
273 return t_path
274
275 all_templates = [
276 os.path.join(root, file)
277 for root, _, files in os.walk(self.templates_dir)
278 for file in files
279 ]
280 matches = [
281 template
282 for template in all_templates
283 if os.path.splitext(template)[0] == t_path
284 ]
285 if matches:
286 assert len(matches) == 1
287 return matches[0]
288
289 raise TemplateNotFoundError(path)
290
291 def __init__(self, dvc_dir):
292 self.dvc_dir = dvc_dir
293
294 if not os.path.exists(self.templates_dir):
295 makedirs(self.templates_dir, exist_ok=True)
296 for t in self.TEMPLATES:
297 t(self.templates_dir).dump()
298
[end of dvc/repo/plots/template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/plots/template.py b/dvc/repo/plots/template.py
--- a/dvc/repo/plots/template.py
+++ b/dvc/repo/plots/template.py
@@ -59,6 +59,7 @@
indent=self.INDENT,
separators=self.SEPARATORS,
)
+ fobj.write("\n")
@staticmethod
def get_data_anchor(template_content):
| {"golden_diff": "diff --git a/dvc/repo/plots/template.py b/dvc/repo/plots/template.py\n--- a/dvc/repo/plots/template.py\n+++ b/dvc/repo/plots/template.py\n@@ -59,6 +59,7 @@\n indent=self.INDENT,\n separators=self.SEPARATORS,\n )\n+ fobj.write(\"\\n\")\n \n @staticmethod\n def get_data_anchor(template_content):\n", "issue": "End of file fixer\nI am using an [end-of-file fixer in the pre-commit hook](https://pre-commit.com/hooks.html). It checks that the file ends with an empty new line.\r\n\r\nIt looks like files\r\n\r\n```\r\n\tmodified: .dvc/plots/confusion.json\r\n\tmodified: .dvc/plots/default.json\r\n\tmodified: .dvc/plots/scatter.json\r\n\r\n```\r\n\r\nThat are automatically created by `dvc init` do not have an empty line at the end of the file.\n", "before_files": [{"content": "import json\nimport logging\nimport os\nimport re\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import DvcException\nfrom dvc.utils.fs import makedirs\n\nlogger = logging.getLogger(__name__)\n\n\nclass TemplateNotFoundError(DvcException):\n def __init__(self, path):\n super().__init__(f\"Template '{path}' not found.\")\n\n\nclass NoDataForTemplateError(DvcException):\n def __init__(self, template_path):\n super().__init__(\n \"No data provided for '{}'.\".format(os.path.relpath(template_path))\n )\n\n\nclass NoFieldInDataError(DvcException):\n def __init__(self, field_name):\n super().__init__(\n f\"Field '{field_name}' does not exist in provided data.\"\n )\n\n\nclass Template:\n INDENT = 4\n SEPARATORS = (\",\", \": \")\n EXTENSION = \".json\"\n METRIC_DATA_ANCHOR = \"<DVC_METRIC_DATA>\"\n X_ANCHOR = \"<DVC_METRIC_X>\"\n Y_ANCHOR = \"<DVC_METRIC_Y>\"\n TITLE_ANCHOR = \"<DVC_METRIC_TITLE>\"\n X_TITLE_ANCHOR = \"<DVC_METRIC_X_TITLE>\"\n Y_TITLE_ANCHOR = \"<DVC_METRIC_Y_TITLE>\"\n\n def __init__(self, templates_dir):\n self.plot_templates_dir = templates_dir\n\n def dump(self):\n makedirs(self.plot_templates_dir, exist_ok=True)\n\n with open(\n os.path.join(\n self.plot_templates_dir, self.TEMPLATE_NAME + self.EXTENSION\n ),\n \"w\",\n ) as fobj:\n json.dump(\n self.DEFAULT_CONTENT,\n fobj,\n indent=self.INDENT,\n separators=self.SEPARATORS,\n )\n\n @staticmethod\n def get_data_anchor(template_content):\n regex = re.compile('\"<DVC_METRIC_DATA[^>\"]*>\"')\n return regex.findall(template_content)\n\n @staticmethod\n def parse_data_anchors(template_content):\n data_files = {\n Template.get_datafile(m)\n for m in Template.get_data_anchor(template_content)\n }\n return {df for df in data_files if df}\n\n @staticmethod\n def get_datafile(anchor_string):\n return (\n anchor_string.replace(\"<\", \"\")\n .replace(\">\", \"\")\n .replace('\"', \"\")\n .replace(\"DVC_METRIC_DATA\", \"\")\n .replace(\",\", \"\")\n )\n\n @staticmethod\n def fill(\n template_path,\n data,\n priority_datafile=None,\n x_field=None,\n y_field=None,\n title=None,\n x_title=None,\n y_title=None,\n ):\n with open(template_path) as fobj:\n result_content = fobj.read()\n\n if x_field:\n Template._check_field_exists(data, x_field)\n if y_field:\n Template._check_field_exists(data, y_field)\n\n result_content = Template._replace_data_anchors(\n result_content, data, priority_datafile\n )\n\n result_content = Template._replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n )\n\n return result_content\n\n @staticmethod\n def _check_field_exists(data, field):\n for file, data_points in data.items():\n if not any(\n field in data_point.keys() for data_point in data_points\n ):\n raise NoFieldInDataError(field)\n\n @staticmethod\n def _replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n ):\n if Template.TITLE_ANCHOR in result_content:\n if title:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, title\n )\n else:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, \"\"\n )\n if Template.X_ANCHOR in result_content and x_field:\n result_content = result_content.replace(Template.X_ANCHOR, x_field)\n if Template.Y_ANCHOR in result_content and y_field:\n result_content = result_content.replace(Template.Y_ANCHOR, y_field)\n if Template.X_TITLE_ANCHOR in result_content:\n if not x_title and x_field:\n x_title = x_field\n result_content = result_content.replace(\n Template.X_TITLE_ANCHOR, x_title\n )\n if Template.Y_TITLE_ANCHOR in result_content:\n if not y_title and y_field:\n y_title = y_field\n result_content = result_content.replace(\n Template.Y_TITLE_ANCHOR, y_title\n )\n return result_content\n\n @staticmethod\n def _replace_data_anchors(result_content, data, priority_datafile):\n for anchor in Template.get_data_anchor(result_content):\n file = Template.get_datafile(anchor)\n\n if not file or priority_datafile:\n key = priority_datafile\n else:\n key = file\n\n result_content = result_content.replace(\n anchor,\n json.dumps(\n data[key],\n indent=Template.INDENT,\n separators=Template.SEPARATORS,\n sort_keys=True,\n ),\n )\n return result_content\n\n\nclass DefaultLinearTemplate(Template):\n TEMPLATE_NAME = \"default\"\n\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": {\"type\": \"line\"},\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultConfusionTemplate(Template):\n TEMPLATE_NAME = \"confusion\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"rect\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.Y_TITLE_ANCHOR,\n },\n \"color\": {\"aggregate\": \"count\", \"type\": \"quantitative\"},\n \"facet\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultScatterTemplate(Template):\n TEMPLATE_NAME = \"scatter\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"point\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass PlotTemplates:\n TEMPLATES_DIR = \"plots\"\n TEMPLATES = [\n DefaultLinearTemplate,\n DefaultConfusionTemplate,\n DefaultScatterTemplate,\n ]\n\n @cached_property\n def templates_dir(self):\n return os.path.join(self.dvc_dir, self.TEMPLATES_DIR)\n\n @cached_property\n def default_template(self):\n default_plot_path = os.path.join(self.templates_dir, \"default.json\")\n if not os.path.exists(default_plot_path):\n raise TemplateNotFoundError(os.path.relpath(default_plot_path))\n return default_plot_path\n\n def get_template(self, path):\n t_path = os.path.join(self.templates_dir, path)\n if os.path.exists(t_path):\n return t_path\n\n all_templates = [\n os.path.join(root, file)\n for root, _, files in os.walk(self.templates_dir)\n for file in files\n ]\n matches = [\n template\n for template in all_templates\n if os.path.splitext(template)[0] == t_path\n ]\n if matches:\n assert len(matches) == 1\n return matches[0]\n\n raise TemplateNotFoundError(path)\n\n def __init__(self, dvc_dir):\n self.dvc_dir = dvc_dir\n\n if not os.path.exists(self.templates_dir):\n makedirs(self.templates_dir, exist_ok=True)\n for t in self.TEMPLATES:\n t(self.templates_dir).dump()\n", "path": "dvc/repo/plots/template.py"}]} | 3,442 | 94 |
gh_patches_debug_2344 | rasdani/github-patches | git_diff | ethereum__web3.py-3196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove lru-dict dependency
lru-dict requires a wheel that is not pre-compiled for Python 3.11.
It is only used in 1 place where it should be able to be replaced with the built-in functools lru cache: https://github.com/ethereum/web3.py/blob/master/web3/middleware/cache.py#L196
Removing this dependency would avoid future compatibility problems as well.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from setuptools import (
3 find_packages,
4 setup,
5 )
6
7 extras_require = {
8 "tester": [
9 "eth-tester[py-evm]==v0.9.1-b.1",
10 "py-geth>=3.11.0",
11 ],
12 "linter": [
13 "black>=22.1.0",
14 "flake8==3.8.3",
15 "isort>=5.11.0",
16 "mypy==1.4.1",
17 "types-setuptools>=57.4.4",
18 "types-requests>=2.26.1",
19 "types-protobuf==3.19.13",
20 ],
21 "docs": [
22 "sphinx>=5.3.0",
23 "sphinx_rtd_theme>=1.0.0",
24 "towncrier>=21,<22",
25 ],
26 "dev": [
27 "bumpversion",
28 "flaky>=3.7.0",
29 "hypothesis>=3.31.2",
30 "importlib-metadata<5.0;python_version<'3.8'",
31 "pytest>=7.0.0",
32 "pytest-asyncio>=0.18.1,<0.23",
33 "pytest-mock>=1.10",
34 "pytest-watch>=4.2",
35 "pytest-xdist>=1.29",
36 "setuptools>=38.6.0",
37 "tox>=3.18.0",
38 "tqdm>4.32",
39 "twine>=1.13",
40 "when-changed>=0.3.0",
41 "build>=0.9.0",
42 ],
43 "ipfs": [
44 "ipfshttpclient==0.8.0a2",
45 ],
46 }
47
48 extras_require["dev"] = (
49 extras_require["tester"]
50 + extras_require["linter"]
51 + extras_require["docs"]
52 + extras_require["ipfs"]
53 + extras_require["dev"]
54 )
55
56 with open("./README.md") as readme:
57 long_description = readme.read()
58
59 setup(
60 name="web3",
61 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
62 version="6.14.0",
63 description="""web3.py""",
64 long_description_content_type="text/markdown",
65 long_description=long_description,
66 author="The Ethereum Foundation",
67 author_email="[email protected]",
68 url="https://github.com/ethereum/web3.py",
69 include_package_data=True,
70 install_requires=[
71 "aiohttp>=3.7.4.post0",
72 "eth-abi>=4.0.0",
73 "eth-account>=0.8.0",
74 "eth-hash[pycryptodome]>=0.5.1",
75 "eth-typing>=3.0.0",
76 "eth-utils>=2.1.0",
77 "hexbytes>=0.1.0,<0.4.0",
78 "jsonschema>=4.0.0",
79 "lru-dict>=1.1.6,<1.3.0",
80 "protobuf>=4.21.6",
81 "pydantic>=2.4.0",
82 "pywin32>=223;platform_system=='Windows'",
83 "requests>=2.16.0",
84 "typing-extensions>=4.0.1",
85 "websockets>=10.0.0",
86 "pyunormalize>=15.0.0",
87 ],
88 python_requires=">=3.7.2",
89 extras_require=extras_require,
90 py_modules=["web3", "ens", "ethpm"],
91 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]},
92 license="MIT",
93 zip_safe=False,
94 keywords="ethereum",
95 packages=find_packages(exclude=["tests", "tests.*"]),
96 package_data={"web3": ["py.typed"]},
97 classifiers=[
98 "Development Status :: 5 - Production/Stable",
99 "Intended Audience :: Developers",
100 "License :: OSI Approved :: MIT License",
101 "Natural Language :: English",
102 "Programming Language :: Python :: 3",
103 "Programming Language :: Python :: 3.7",
104 "Programming Language :: Python :: 3.8",
105 "Programming Language :: Python :: 3.9",
106 "Programming Language :: Python :: 3.10",
107 "Programming Language :: Python :: 3.11",
108 ],
109 )
110
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,6 @@
"eth-utils>=2.1.0",
"hexbytes>=0.1.0,<0.4.0",
"jsonschema>=4.0.0",
- "lru-dict>=1.1.6,<1.3.0",
"protobuf>=4.21.6",
"pydantic>=2.4.0",
"pywin32>=223;platform_system=='Windows'",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,7 +76,6 @@\n \"eth-utils>=2.1.0\",\n \"hexbytes>=0.1.0,<0.4.0\",\n \"jsonschema>=4.0.0\",\n- \"lru-dict>=1.1.6,<1.3.0\",\n \"protobuf>=4.21.6\",\n \"pydantic>=2.4.0\",\n \"pywin32>=223;platform_system=='Windows'\",\n", "issue": "Remove lru-dict dependency\nlru-dict requires a wheel that is not pre-compiled for Python 3.11.\r\n\r\nIt is only used in 1 place where it should be able to be replaced with the built-in functools lru cache: https://github.com/ethereum/web3.py/blob/master/web3/middleware/cache.py#L196\r\n\r\nRemoving this dependency would avoid future compatibility problems as well.\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n \"tester\": [\n \"eth-tester[py-evm]==v0.9.1-b.1\",\n \"py-geth>=3.11.0\",\n ],\n \"linter\": [\n \"black>=22.1.0\",\n \"flake8==3.8.3\",\n \"isort>=5.11.0\",\n \"mypy==1.4.1\",\n \"types-setuptools>=57.4.4\",\n \"types-requests>=2.26.1\",\n \"types-protobuf==3.19.13\",\n ],\n \"docs\": [\n \"sphinx>=5.3.0\",\n \"sphinx_rtd_theme>=1.0.0\",\n \"towncrier>=21,<22\",\n ],\n \"dev\": [\n \"bumpversion\",\n \"flaky>=3.7.0\",\n \"hypothesis>=3.31.2\",\n \"importlib-metadata<5.0;python_version<'3.8'\",\n \"pytest>=7.0.0\",\n \"pytest-asyncio>=0.18.1,<0.23\",\n \"pytest-mock>=1.10\",\n \"pytest-watch>=4.2\",\n \"pytest-xdist>=1.29\",\n \"setuptools>=38.6.0\",\n \"tox>=3.18.0\",\n \"tqdm>4.32\",\n \"twine>=1.13\",\n \"when-changed>=0.3.0\",\n \"build>=0.9.0\",\n ],\n \"ipfs\": [\n \"ipfshttpclient==0.8.0a2\",\n ],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"tester\"]\n + extras_require[\"linter\"]\n + extras_require[\"docs\"]\n + extras_require[\"ipfs\"]\n + extras_require[\"dev\"]\n)\n\nwith open(\"./README.md\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"web3\",\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version=\"6.14.0\",\n description=\"\"\"web3.py\"\"\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n author=\"The Ethereum Foundation\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ethereum/web3.py\",\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0\",\n \"eth-abi>=4.0.0\",\n \"eth-account>=0.8.0\",\n \"eth-hash[pycryptodome]>=0.5.1\",\n \"eth-typing>=3.0.0\",\n \"eth-utils>=2.1.0\",\n \"hexbytes>=0.1.0,<0.4.0\",\n \"jsonschema>=4.0.0\",\n \"lru-dict>=1.1.6,<1.3.0\",\n \"protobuf>=4.21.6\",\n \"pydantic>=2.4.0\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0\",\n \"typing-extensions>=4.0.1\",\n \"websockets>=10.0.0\",\n \"pyunormalize>=15.0.0\",\n ],\n python_requires=\">=3.7.2\",\n extras_require=extras_require,\n py_modules=[\"web3\", \"ens\", \"ethpm\"],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords=\"ethereum\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n)\n", "path": "setup.py"}]} | 1,837 | 130 |
gh_patches_debug_6133 | rasdani/github-patches | git_diff | DataBiosphere__toil-2888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.6 no longer available via ppa:jonathonf/python-3.6
Recently announced: https://launchpad.net/~jonathonf
I think we can use `ppa:deadsnakes/ppa` instead and things should work fine again.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-470)
┆Issue Number: TOIL-470
</issue>
<code>
[start of docker/Dockerfile.py]
1 # Copyright (C) 2015-2016 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import print_function
16 import os
17 import textwrap
18
19 applianceSelf = os.environ['TOIL_APPLIANCE_SELF']
20 sdistName = os.environ['_TOIL_SDIST_NAME']
21
22
23 dependencies = ' '.join(['libffi-dev', # For client side encryption for extras with PyNACL
24 'python3.6',
25 'python3.6-dev',
26 'python-dev', # For installing Python packages with native code
27 'python-pip', # Bootstrap pip, but needs upgrading, see below
28 'python3-pip',
29 'libcurl4-openssl-dev',
30 'libssl-dev',
31 'wget',
32 'curl',
33 'openssh-server',
34 'mesos=1.0.1-2.0.94.ubuntu1604',
35 "nodejs", # CWL support for javascript expressions
36 'rsync',
37 'screen',
38 'build-essential', # We need a build environment to build Singularity 3.
39 'uuid-dev',
40 'libgpgme11-dev',
41 'libseccomp-dev',
42 'pkg-config',
43 'squashfs-tools',
44 'cryptsetup',
45 'git'])
46
47
48 def heredoc(s):
49 s = textwrap.dedent(s).format(**globals())
50 return s[1:] if s.startswith('\n') else s
51
52
53 motd = heredoc('''
54
55 This is the Toil appliance. You can run your Toil script directly on the appliance.
56 Run toil <workflow>.py --help to see all options for running your workflow.
57 For more information see http://toil.readthedocs.io/en/latest/
58
59 Copyright (C) 2015-2018 Regents of the University of California
60
61 Version: {applianceSelf}
62
63 ''')
64
65 # Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print
66 motd = ''.join(l + '\\n\\\n' for l in motd.splitlines())
67
68 print(heredoc('''
69 FROM ubuntu:16.04
70
71 RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install apt-transport-https ca-certificates software-properties-common && apt-get clean && rm -rf /var/lib/apt/lists/*
72
73 RUN echo "deb http://repos.mesosphere.io/ubuntu/ xenial main" \
74 > /etc/apt/sources.list.d/mesosphere.list \
75 && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \
76 && echo "deb http://deb.nodesource.com/node_6.x xenial main" \
77 > /etc/apt/sources.list.d/nodesource.list \
78 && apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280
79
80 RUN add-apt-repository -y ppa:jonathonf/python-3.6
81
82 RUN apt-get -y update --fix-missing && \
83 DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \
84 DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \
85 apt-get clean && \
86 rm -rf /var/lib/apt/lists/*
87
88 RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \
89 tar xvf go1.13.3.linux-amd64.tar.gz && \
90 mv go/bin/* /usr/bin/ && \
91 mv go /usr/local/
92
93 RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \
94 cd $(go env GOPATH)/src/github.com/sylabs && \
95 git clone https://github.com/sylabs/singularity.git && \
96 cd singularity && \
97 git checkout v3.4.2 && \
98 ./mconfig && \
99 cd ./builddir && \
100 make -j4 && \
101 make install
102
103 RUN mkdir /root/.ssh && \
104 chmod 700 /root/.ssh
105
106 ADD waitForKey.sh /usr/bin/waitForKey.sh
107
108 ADD customDockerInit.sh /usr/bin/customDockerInit.sh
109
110 RUN chmod 777 /usr/bin/waitForKey.sh && chmod 777 /usr/bin/customDockerInit.sh
111
112 # The stock pip is too old and can't install from sdist with extras
113 RUN pip install --upgrade pip==9.0.1
114
115 # Default setuptools is too old
116 RUN pip install --upgrade setuptools==36.5.0
117
118 # Include virtualenv, as it is still the recommended way to deploy pipelines
119 RUN pip install --upgrade virtualenv==15.0.3
120
121 # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)
122 RUN virtualenv --never-download /home/s3am \
123 && /home/s3am/bin/pip install s3am==2.0 \
124 && ln -s /home/s3am/bin/s3am /usr/local/bin/
125
126 # Install statically linked version of docker client
127 RUN curl https://download.docker.com/linux/static/stable/x86_64/docker-18.06.1-ce.tgz \
128 | tar -xvzf - --transform='s,[^/]*/,,g' -C /usr/local/bin/ \
129 && chmod u+x /usr/local/bin/docker
130
131 # Fix for Mesos interface dependency missing on ubuntu
132 RUN pip install protobuf==3.0.0
133
134 # Fix for https://issues.apache.org/jira/browse/MESOS-3793
135 ENV MESOS_LAUNCHER=posix
136
137 # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)
138 ENV TERM linux
139
140 # Run bash instead of sh inside of screen
141 ENV SHELL /bin/bash
142 RUN echo "defshell -bash" > ~/.screenrc
143
144 # An appliance may need to start more appliances, e.g. when the leader appliance launches the
145 # worker appliance on a worker node. To support this, we embed a self-reference into the image:
146 ENV TOIL_APPLIANCE_SELF {applianceSelf}
147
148 RUN mkdir /var/lib/toil
149
150 ENV TOIL_WORKDIR /var/lib/toil
151
152 # This component changes most frequently and keeping it last maximizes Docker cache hits.
153 COPY {sdistName} .
154 RUN pip install {sdistName}[all]
155 RUN rm {sdistName}
156
157 # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect
158 # that the running appliance just gives you a shell. To start the Mesos master or slave
159 # daemons, the user # should override the entrypoint via --entrypoint.
160
161 RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \
162 && printf '{motd}' > /etc/motd
163 '''))
164
[end of docker/Dockerfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/Dockerfile.py b/docker/Dockerfile.py
--- a/docker/Dockerfile.py
+++ b/docker/Dockerfile.py
@@ -77,7 +77,7 @@
> /etc/apt/sources.list.d/nodesource.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280
- RUN add-apt-repository -y ppa:jonathonf/python-3.6
+ RUN add-apt-repository -y ppa:deadsnakes/ppa
RUN apt-get -y update --fix-missing && \
DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \
| {"golden_diff": "diff --git a/docker/Dockerfile.py b/docker/Dockerfile.py\n--- a/docker/Dockerfile.py\n+++ b/docker/Dockerfile.py\n@@ -77,7 +77,7 @@\n > /etc/apt/sources.list.d/nodesource.list \\\n && apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280\n \n- RUN add-apt-repository -y ppa:jonathonf/python-3.6\n+ RUN add-apt-repository -y ppa:deadsnakes/ppa\n \n RUN apt-get -y update --fix-missing && \\\n DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \\\n", "issue": "Python 3.6 no longer available via ppa:jonathonf/python-3.6\nRecently announced: https://launchpad.net/~jonathonf\n\nI think we can use `ppa:deadsnakes/ppa` instead and things should work fine again.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-470)\n\u2506Issue Number: TOIL-470\n\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport textwrap\n\napplianceSelf = os.environ['TOIL_APPLIANCE_SELF']\nsdistName = os.environ['_TOIL_SDIST_NAME']\n\n\ndependencies = ' '.join(['libffi-dev', # For client side encryption for extras with PyNACL\n 'python3.6',\n 'python3.6-dev',\n 'python-dev', # For installing Python packages with native code\n 'python-pip', # Bootstrap pip, but needs upgrading, see below\n 'python3-pip',\n 'libcurl4-openssl-dev',\n 'libssl-dev',\n 'wget',\n 'curl',\n 'openssh-server',\n 'mesos=1.0.1-2.0.94.ubuntu1604',\n \"nodejs\", # CWL support for javascript expressions\n 'rsync',\n 'screen',\n 'build-essential', # We need a build environment to build Singularity 3.\n 'uuid-dev',\n 'libgpgme11-dev',\n 'libseccomp-dev',\n 'pkg-config',\n 'squashfs-tools',\n 'cryptsetup',\n 'git'])\n\n\ndef heredoc(s):\n s = textwrap.dedent(s).format(**globals())\n return s[1:] if s.startswith('\\n') else s\n\n\nmotd = heredoc('''\n\n This is the Toil appliance. You can run your Toil script directly on the appliance. \n Run toil <workflow>.py --help to see all options for running your workflow.\n For more information see http://toil.readthedocs.io/en/latest/\n\n Copyright (C) 2015-2018 Regents of the University of California\n\n Version: {applianceSelf}\n\n''')\n\n# Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print\nmotd = ''.join(l + '\\\\n\\\\\\n' for l in motd.splitlines())\n\nprint(heredoc('''\n FROM ubuntu:16.04\n\n RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install apt-transport-https ca-certificates software-properties-common && apt-get clean && rm -rf /var/lib/apt/lists/*\n\n RUN echo \"deb http://repos.mesosphere.io/ubuntu/ xenial main\" \\\n > /etc/apt/sources.list.d/mesosphere.list \\\n && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \\\n && echo \"deb http://deb.nodesource.com/node_6.x xenial main\" \\\n > /etc/apt/sources.list.d/nodesource.list \\\n && apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280\n\n RUN add-apt-repository -y ppa:jonathonf/python-3.6\n \n RUN apt-get -y update --fix-missing && \\\n DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \\\n DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/*\n\n RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \\\n tar xvf go1.13.3.linux-amd64.tar.gz && \\\n mv go/bin/* /usr/bin/ && \\\n mv go /usr/local/\n \n RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \\\n cd $(go env GOPATH)/src/github.com/sylabs && \\\n git clone https://github.com/sylabs/singularity.git && \\\n cd singularity && \\\n git checkout v3.4.2 && \\\n ./mconfig && \\\n cd ./builddir && \\\n make -j4 && \\\n make install\n \n RUN mkdir /root/.ssh && \\\n chmod 700 /root/.ssh\n\n ADD waitForKey.sh /usr/bin/waitForKey.sh\n\n ADD customDockerInit.sh /usr/bin/customDockerInit.sh\n\n RUN chmod 777 /usr/bin/waitForKey.sh && chmod 777 /usr/bin/customDockerInit.sh\n \n # The stock pip is too old and can't install from sdist with extras\n RUN pip install --upgrade pip==9.0.1\n\n # Default setuptools is too old\n RUN pip install --upgrade setuptools==36.5.0\n\n # Include virtualenv, as it is still the recommended way to deploy pipelines\n RUN pip install --upgrade virtualenv==15.0.3\n\n # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)\n RUN virtualenv --never-download /home/s3am \\\n && /home/s3am/bin/pip install s3am==2.0 \\\n && ln -s /home/s3am/bin/s3am /usr/local/bin/\n\n # Install statically linked version of docker client\n RUN curl https://download.docker.com/linux/static/stable/x86_64/docker-18.06.1-ce.tgz \\\n | tar -xvzf - --transform='s,[^/]*/,,g' -C /usr/local/bin/ \\\n && chmod u+x /usr/local/bin/docker\n\n # Fix for Mesos interface dependency missing on ubuntu\n RUN pip install protobuf==3.0.0\n\n # Fix for https://issues.apache.org/jira/browse/MESOS-3793\n ENV MESOS_LAUNCHER=posix\n\n # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)\n ENV TERM linux\n \n # Run bash instead of sh inside of screen\n ENV SHELL /bin/bash\n RUN echo \"defshell -bash\" > ~/.screenrc\n\n # An appliance may need to start more appliances, e.g. when the leader appliance launches the\n # worker appliance on a worker node. To support this, we embed a self-reference into the image:\n ENV TOIL_APPLIANCE_SELF {applianceSelf}\n\n RUN mkdir /var/lib/toil\n\n ENV TOIL_WORKDIR /var/lib/toil\n\n # This component changes most frequently and keeping it last maximizes Docker cache hits.\n COPY {sdistName} .\n RUN pip install {sdistName}[all]\n RUN rm {sdistName}\n\n # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect\n # that the running appliance just gives you a shell. To start the Mesos master or slave\n # daemons, the user # should override the entrypoint via --entrypoint.\n\n RUN echo '[ ! -z \"$TERM\" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \\\n && printf '{motd}' > /etc/motd\n'''))\n", "path": "docker/Dockerfile.py"}]} | 2,733 | 153 |
gh_patches_debug_41325 | rasdani/github-patches | git_diff | Flexget__Flexget-2263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No results found on torrentday
### Expected behaviour:
When using the torrentday as a source with discover, results should display
### Actual behaviour:
Always returns "No results from torrentday"
### Steps to reproduce:
Setup to search as per config
#### Config:
https://pastebin.com/6xWSD9i2
#### Log:
https://pastebin.com/uqa0bDea
### Additional information:
torrentday appearts to have updated their browse/search page https://www.torrentday.com/browse.php
</issue>
<code>
[start of flexget/plugins/sites/torrentday.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3 from future.moves.urllib.parse import quote
4
5 import re
6 import logging
7
8 from requests.exceptions import RequestException
9
10 from flexget import plugin
11 from flexget.config_schema import one_or_more
12 from flexget.entry import Entry
13 from flexget.event import event
14 from flexget.plugin import PluginError
15 from flexget.plugins.internal.urlrewriting import UrlRewritingError
16 from flexget.utils import requests
17 from flexget.utils.soup import get_soup
18 from flexget.utils.search import torrent_availability, normalize_unicode
19 from flexget.utils.tools import parse_filesize
20
21 log = logging.getLogger('torrentday')
22
23 CATEGORIES = {
24 'all': 0,
25 # Movies
26 'mov480p': 25,
27 'movHD': 11,
28 'movBD': 5,
29 'movDVD': 3,
30 'movMP4': 21,
31 'movNonEnglish': 22,
32 'movPACKS': 13,
33 'movSDx264': 44,
34 'movX265': 48,
35 'movXVID': 1,
36
37 # TV
38 'tv480p': 24,
39 'tvBRD': 32,
40 'tvDVD': 31,
41 'tvDVDrip': 33,
42 'tvMOBILE': 46,
43 'tvPACKS': 14,
44 'tvSDx264': 26,
45 'tvHDx264': 7,
46 'tvX265': 34,
47 'tvXVID': 2
48 }
49
50
51 class UrlRewriteTorrentday(object):
52 """
53 Torrentday urlrewriter and search plugin.
54
55 torrentday:
56 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies
57 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous
58 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES
59 rss_key: xxxxxxxxx (required) get this from your profile page
60 category: xxxxxxxx
61
62 Category can be one of
63 ID from browsing site OR 'name'
64 movies:
65 mov480p, movHD, movBD, movDVD,
66 movMP4, movNonEnglish, movPACKS,
67 movSDx264, movX265, movXVID
68 tv:
69 tv480p, tvBRD, tvDVD, tvDVDrip,
70 tvMOBILE, tvPACKS, tvSDx264,
71 tvHDx264, tvX265, tvXVID
72 """
73
74 schema = {
75 'type': 'object',
76 'properties': {
77 'rss_key': {'type': 'string'},
78 'uid': {'type': 'string'},
79 'passkey': {'type': 'string'},
80 'cfduid': {'type': 'string'},
81 'category': {
82 'oneOf': [
83 {'type': 'integer'},
84 {'type': 'string', 'enum': list(CATEGORIES)},
85 ]
86 },
87 },
88 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],
89 'additionalProperties': False
90 }
91
92 # urlrewriter API
93 def url_rewritable(self, task, entry):
94 url = entry['url']
95 if url.find('.torrent'):
96 return False
97 if url.startswith('https://www.torrentday.com'):
98 return True
99 return False
100
101 # urlrewriter API
102 def url_rewrite(self, task, entry):
103 if 'url' not in entry:
104 log.error('Didn\'t actually get a URL...')
105 else:
106 log.debug('Got the URL: %s', entry['url'])
107 if entry['url'].startswith('https://www.torrentday.com/browse'):
108 # use search
109 results = self.search(task, entry)
110 if not results:
111 raise UrlRewritingError('No search results found')
112 entry['url'] = results[0]['url']
113
114 @plugin.internet(log)
115 def search(self, task, entry, config=None):
116 """
117 Search for name from torrentday.
118 """
119
120 categories = config.get('category', 'all')
121 # Make sure categories is a list
122 if not isinstance(categories, list):
123 categories = [categories]
124 # If there are any text categories, turn them into their id number
125 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
126 params = { 'cata': 'yes', 'c%s' % ','.join(str(c) for c in categories): 1, 'clear-new': 1}
127 entries = set()
128 for search_string in entry.get('search_strings', [entry['title']]):
129
130 url = 'https://www.torrentday.com/browse.php'
131 params['search'] = normalize_unicode(search_string).replace(':', '')
132 cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }
133
134 try:
135 page = requests.get(url, params=params, cookies=cookies).content
136 except RequestException as e:
137 raise PluginError('Could not connect to torrentday: %s' % e)
138
139 soup = get_soup(page)
140
141 for tr in soup.find_all('tr', { 'class': 'browse' }):
142 entry = Entry()
143 # find the torrent names
144 title = tr.find('a', { 'class': 'torrentName' })
145 entry['title'] = title.contents[0]
146 log.debug('title: %s', title.contents[0])
147
148 # find download link
149 torrent_url = tr.find('td', { 'class': 'dlLinksInfo' })
150 torrent_url = torrent_url.find('a').get('href')
151
152 # construct download URL
153 torrent_url = ( 'https://www.torrentday.com/' + torrent_url + '?torrent_pass=' + config['rss_key'] )
154 log.debug('RSS-ified download link: %s', torrent_url)
155 entry['url'] = torrent_url
156
157 # us tr object for seeders/leechers
158 seeders, leechers = tr.find_all('td', { 'class': ['seedersInfo', 'leechersInfo']})
159 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
160 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
161 entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
162
163 # use tr object for size
164 size = tr.find('td', text=re.compile('([\.\d]+) ([TGMKk]?)B')).contents[0]
165 size = re.search('([\.\d]+) ([TGMKk]?)B', str(size))
166
167 entry['content_size'] = parse_filesize(size.group(0))
168
169 entries.add(entry)
170
171 return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))
172
173
174 @event('plugin.register')
175 def register_plugin():
176 plugin.register(UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2)
177
[end of flexget/plugins/sites/torrentday.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/sites/torrentday.py b/flexget/plugins/sites/torrentday.py
--- a/flexget/plugins/sites/torrentday.py
+++ b/flexget/plugins/sites/torrentday.py
@@ -123,30 +123,40 @@
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
- params = { 'cata': 'yes', 'c%s' % ','.join(str(c) for c in categories): 1, 'clear-new': 1}
+ params = { 'cata': 'yes', 'c{}'.format(','.join(str(c) for c in categories)): 1, 'clear-new': 1}
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
- url = 'https://www.torrentday.com/browse.php'
- params['search'] = normalize_unicode(search_string).replace(':', '')
+ url = 'https://www.torrentday.com/t'
+ params['q'] = normalize_unicode(search_string).replace(':', '')
cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }
try:
page = requests.get(url, params=params, cookies=cookies).content
except RequestException as e:
- raise PluginError('Could not connect to torrentday: %s' % e)
+ raise PluginError('Could not connect to torrentday: {}'.format(e))
soup = get_soup(page)
-
- for tr in soup.find_all('tr', { 'class': 'browse' }):
+ # the first row is the header so skip it
+ for tr in soup.find_all('tr')[1:]:
entry = Entry()
# find the torrent names
- title = tr.find('a', { 'class': 'torrentName' })
+ td = tr.find('td', { 'class': 'torrentNameInfo' })
+ if not td:
+ log.warning('Could not find entry torrentNameInfo for %s.', search_string)
+ continue
+ title = td.find('a')
+ if not title:
+ log.warning('Could not determine title for %s.', search_string)
+ continue
entry['title'] = title.contents[0]
log.debug('title: %s', title.contents[0])
# find download link
- torrent_url = tr.find('td', { 'class': 'dlLinksInfo' })
+ torrent_url = tr.find('td', { 'class': 'ac' })
+ if not torrent_url:
+ log.warning('Could not determine download link for %s.', search_string)
+ continue
torrent_url = torrent_url.find('a').get('href')
# construct download URL
@@ -155,7 +165,8 @@
entry['url'] = torrent_url
# us tr object for seeders/leechers
- seeders, leechers = tr.find_all('td', { 'class': ['seedersInfo', 'leechersInfo']})
+ seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})
+ leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})
entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
| {"golden_diff": "diff --git a/flexget/plugins/sites/torrentday.py b/flexget/plugins/sites/torrentday.py\n--- a/flexget/plugins/sites/torrentday.py\n+++ b/flexget/plugins/sites/torrentday.py\n@@ -123,30 +123,40 @@\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n- params = { 'cata': 'yes', 'c%s' % ','.join(str(c) for c in categories): 1, 'clear-new': 1}\n+ params = { 'cata': 'yes', 'c{}'.format(','.join(str(c) for c in categories)): 1, 'clear-new': 1}\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n \n- url = 'https://www.torrentday.com/browse.php'\n- params['search'] = normalize_unicode(search_string).replace(':', '')\n+ url = 'https://www.torrentday.com/t'\n+ params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }\n \n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n- raise PluginError('Could not connect to torrentday: %s' % e)\n+ raise PluginError('Could not connect to torrentday: {}'.format(e))\n \n soup = get_soup(page)\n-\n- for tr in soup.find_all('tr', { 'class': 'browse' }):\n+ # the first row is the header so skip it\n+ for tr in soup.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n- title = tr.find('a', { 'class': 'torrentName' })\n+ td = tr.find('td', { 'class': 'torrentNameInfo' })\n+ if not td:\n+ log.warning('Could not find entry torrentNameInfo for %s.', search_string)\n+ continue\n+ title = td.find('a')\n+ if not title:\n+ log.warning('Could not determine title for %s.', search_string)\n+ continue\n entry['title'] = title.contents[0]\n log.debug('title: %s', title.contents[0])\n \n # find download link\n- torrent_url = tr.find('td', { 'class': 'dlLinksInfo' })\n+ torrent_url = tr.find('td', { 'class': 'ac' })\n+ if not torrent_url:\n+ log.warning('Could not determine download link for %s.', search_string)\n+ continue\n torrent_url = torrent_url.find('a').get('href')\n \n # construct download URL\n@@ -155,7 +165,8 @@\n entry['url'] = torrent_url\n \n # us tr object for seeders/leechers\n- seeders, leechers = tr.find_all('td', { 'class': ['seedersInfo', 'leechersInfo']})\n+ seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})\n+ leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n", "issue": "No results found on torrentday\n### Expected behaviour:\r\nWhen using the torrentday as a source with discover, results should display\r\n\r\n### Actual behaviour:\r\nAlways returns \"No results from torrentday\"\r\n\r\n### Steps to reproduce:\r\nSetup to search as per config\r\n\r\n#### Config:\r\nhttps://pastebin.com/6xWSD9i2\r\n \r\n#### Log:\r\nhttps://pastebin.com/uqa0bDea\r\n\r\n### Additional information:\r\ntorrentday appearts to have updated their browse/search page https://www.torrentday.com/browse.php\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote\n\nimport re\nimport logging\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.search import torrent_availability, normalize_unicode\nfrom flexget.utils.tools import parse_filesize\n\nlog = logging.getLogger('torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2\n}\n\n\nclass UrlRewriteTorrentday(object):\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of \n ID from browsing site OR 'name'\n movies:\n mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvPACKS, tvSDx264, \n tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'type': 'string'},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': {\n 'oneOf': [\n {'type': 'integer'},\n {'type': 'string', 'enum': list(CATEGORIES)},\n ]\n },\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n log.error('Didn\\'t actually get a URL...')\n else:\n log.debug('Got the URL: %s', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(log)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = { 'cata': 'yes', 'c%s' % ','.join(str(c) for c in categories): 1, 'clear-new': 1}\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/browse.php'\n params['search'] = normalize_unicode(search_string).replace(':', '')\n cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: %s' % e)\n\n soup = get_soup(page)\n\n for tr in soup.find_all('tr', { 'class': 'browse' }):\n entry = Entry()\n # find the torrent names\n title = tr.find('a', { 'class': 'torrentName' })\n entry['title'] = title.contents[0]\n log.debug('title: %s', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', { 'class': 'dlLinksInfo' })\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = ( 'https://www.torrentday.com/' + torrent_url + '?torrent_pass=' + config['rss_key'] )\n log.debug('RSS-ified download link: %s', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders, leechers = tr.find_all('td', { 'class': ['seedersInfo', 'leechersInfo']})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n\n # use tr object for size\n size = tr.find('td', text=re.compile('([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search('([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/torrentday.py"}]} | 2,705 | 818 |
gh_patches_debug_19265 | rasdani/github-patches | git_diff | ipython__ipython-1935 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyreadline version dependency not correctly checked
Installing IPython on windows with `python setup.py install` and pyreadline 1.5:
<pre>
C:\code\dev_trees\ipython [main-master]> ipython
Python 2.6.5 (r265:79096, Mar 19 2010, 21:48:26) [MSC v.1500 32 bit (Intel)]
Type "copyright", "credits" or "license" for more information.
IPython 0.13.dev -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
Traceback (most recent call last):
File "C:\Python26\Scripts\ipython-script.py", line 9, in <module>
load_entry_point('ipython==0.13.dev', 'console_scripts', 'ipython')()
File "C:\Python26\lib\site-packages\ipython-0.13.dev-py2.6.egg\IPython\frontend\terminal\ipapp.py", line 409, in launch_new_instance
app.start()
File "C:\Python26\lib\site-packages\ipython-0.13.dev-py2.6.egg\IPython\frontend\terminal\ipapp.py", line 383, in start
self.shell.mainloop()
File "C:\Python26\lib\site-packages\ipython-0.13.dev-py2.6.egg\IPython\frontend\terminal\interactiveshell.py", line 290, in mainloop
self.interact(display_banner=display_banner)
File "C:\Python26\lib\site-packages\ipython-0.13.dev-py2.6.egg\IPython\frontend\terminal\interactiveshell.py", line 346, in interact
hlen_b4_cell = self.readline.get_current_history_length()
AttributeError: 'module' object has no attribute 'get_current_history_length'
</pre>
I see that `setup.py` `requires` pyreadline >= 1.7.1, iff `setupext.check_for_readline()` returns False. However, in my case, it returns True because the function does not check the version, and I have version 1.5. I wasn't sure how best to put the version dependency into the function.
</issue>
<code>
[start of setupext/setupext.py]
1 # encoding: utf-8
2 from __future__ import print_function
3
4 __docformat__ = "restructuredtext en"
5
6 #-------------------------------------------------------------------------------
7 # Copyright (C) 2008 The IPython Development Team
8 #
9 # Distributed under the terms of the BSD License. The full license is in
10 # the file COPYING, distributed as part of this software.
11 #-------------------------------------------------------------------------------
12
13 #-------------------------------------------------------------------------------
14 # Imports
15 #-------------------------------------------------------------------------------
16
17 import sys, os
18 from textwrap import fill
19
20 display_status=True
21
22 def check_display(f):
23 """decorator to allow display methods to be muted by mod.display_status"""
24 def maybe_display(*args, **kwargs):
25 if display_status:
26 return f(*args, **kwargs)
27 return maybe_display
28
29 @check_display
30 def print_line(char='='):
31 print(char * 76)
32
33 @check_display
34 def print_status(package, status):
35 initial_indent = "%22s: " % package
36 indent = ' ' * 24
37 print(fill(str(status), width=76,
38 initial_indent=initial_indent,
39 subsequent_indent=indent))
40
41 @check_display
42 def print_message(message):
43 indent = ' ' * 24 + "* "
44 print(fill(str(message), width=76,
45 initial_indent=indent,
46 subsequent_indent=indent))
47
48 @check_display
49 def print_raw(section):
50 print(section)
51
52 #-------------------------------------------------------------------------------
53 # Tests for specific packages
54 #-------------------------------------------------------------------------------
55
56 def check_for_ipython():
57 try:
58 import IPython
59 except ImportError:
60 print_status("IPython", "Not found")
61 return False
62 else:
63 print_status("IPython", IPython.__version__)
64 return True
65
66 def check_for_sphinx():
67 try:
68 import sphinx
69 except ImportError:
70 print_status('sphinx', "Not found (required for building documentation)")
71 return False
72 else:
73 print_status('sphinx', sphinx.__version__)
74 return True
75
76 def check_for_pygments():
77 try:
78 import pygments
79 except ImportError:
80 print_status('pygments', "Not found (required for syntax highlighting documentation)")
81 return False
82 else:
83 print_status('pygments', pygments.__version__)
84 return True
85
86 def check_for_nose():
87 try:
88 import nose
89 except ImportError:
90 print_status('nose', "Not found (required for running the test suite)")
91 return False
92 else:
93 print_status('nose', nose.__version__)
94 return True
95
96 def check_for_pexpect():
97 try:
98 import pexpect
99 except ImportError:
100 print_status("pexpect", "no (required for running standalone doctests)")
101 return False
102 else:
103 print_status("pexpect", pexpect.__version__)
104 return True
105
106 def check_for_httplib2():
107 try:
108 import httplib2
109 except ImportError:
110 print_status("httplib2", "no (required for blocking http clients)")
111 return False
112 else:
113 print_status("httplib2","yes")
114 return True
115
116 def check_for_sqlalchemy():
117 try:
118 import sqlalchemy
119 except ImportError:
120 print_status("sqlalchemy", "no (required for the ipython1 notebook)")
121 return False
122 else:
123 print_status("sqlalchemy","yes")
124 return True
125
126 def check_for_simplejson():
127 try:
128 import simplejson
129 except ImportError:
130 print_status("simplejson", "no (required for the ipython1 notebook)")
131 return False
132 else:
133 print_status("simplejson","yes")
134 return True
135
136 def check_for_pyzmq():
137 try:
138 import zmq
139 except ImportError:
140 print_status('pyzmq', "no (required for qtconsole, notebook, and parallel computing capabilities)")
141 return False
142 else:
143 # pyzmq 2.1.10 adds pyzmq_version_info funtion for returning
144 # version as a tuple
145 if hasattr(zmq, 'pyzmq_version_info'):
146 if zmq.pyzmq_version_info() >= (2,1,4):
147 print_status("pyzmq", zmq.__version__)
148 return True
149 else:
150 # this branch can never occur, at least until we update our
151 # pyzmq dependency beyond 2.1.10
152 return False
153 # this is necessarily earlier than 2.1.10, so string comparison is
154 # okay
155 if zmq.__version__ < '2.1.4':
156 print_status('pyzmq', "no (have %s, but require >= 2.1.4 for"
157 " qtconsole and parallel computing capabilities)"%zmq.__version__)
158 return False
159 else:
160 print_status("pyzmq", zmq.__version__)
161 return True
162
163 def check_for_readline():
164 try:
165 import readline
166 except ImportError:
167 try:
168 import pyreadline
169 except ImportError:
170 print_status('readline', "no (required for good interactive behavior)")
171 return False
172 else:
173 print_status('readline', "yes pyreadline-"+pyreadline.release.version)
174 return True
175 else:
176 print_status('readline', "yes")
177 return True
178
[end of setupext/setupext.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setupext/setupext.py b/setupext/setupext.py
--- a/setupext/setupext.py
+++ b/setupext/setupext.py
@@ -161,17 +161,22 @@
return True
def check_for_readline():
+ from distutils.version import LooseVersion
try:
import readline
except ImportError:
try:
import pyreadline
- except ImportError:
+ vs = pyreadline.release.version
+ except (ImportError, AttributeError):
print_status('readline', "no (required for good interactive behavior)")
return False
- else:
- print_status('readline', "yes pyreadline-"+pyreadline.release.version)
+ if LooseVersion(vs).version >= [1,7,1]:
+ print_status('readline', "yes pyreadline-" + vs)
return True
+ else:
+ print_status('readline', "no pyreadline-%s < 1.7.1" % vs)
+ return False
else:
print_status('readline', "yes")
return True
| {"golden_diff": "diff --git a/setupext/setupext.py b/setupext/setupext.py\n--- a/setupext/setupext.py\n+++ b/setupext/setupext.py\n@@ -161,17 +161,22 @@\n return True\n \n def check_for_readline():\n+ from distutils.version import LooseVersion\n try:\n import readline\n except ImportError:\n try:\n import pyreadline\n- except ImportError:\n+ vs = pyreadline.release.version\n+ except (ImportError, AttributeError):\n print_status('readline', \"no (required for good interactive behavior)\")\n return False\n- else:\n- print_status('readline', \"yes pyreadline-\"+pyreadline.release.version)\n+ if LooseVersion(vs).version >= [1,7,1]:\n+ print_status('readline', \"yes pyreadline-\" + vs)\n return True\n+ else:\n+ print_status('readline', \"no pyreadline-%s < 1.7.1\" % vs)\n+ return False\n else:\n print_status('readline', \"yes\")\n return True\n", "issue": "pyreadline version dependency not correctly checked\nInstalling IPython on windows with `python setup.py install` and pyreadline 1.5:\n\n<pre>\nC:\\code\\dev_trees\\ipython [main-master]> ipython\nPython 2.6.5 (r265:79096, Mar 19 2010, 21:48:26) [MSC v.1500 32 bit (Intel)]\nType \"copyright\", \"credits\" or \"license\" for more information.\n\nIPython 0.13.dev -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython's features.\n%quickref -> Quick reference.\nhelp -> Python's own help system.\nobject? -> Details about 'object', use 'object??' for extra details.\nTraceback (most recent call last):\n File \"C:\\Python26\\Scripts\\ipython-script.py\", line 9, in <module>\n load_entry_point('ipython==0.13.dev', 'console_scripts', 'ipython')()\n File \"C:\\Python26\\lib\\site-packages\\ipython-0.13.dev-py2.6.egg\\IPython\\frontend\\terminal\\ipapp.py\", line 409, in launch_new_instance\n app.start()\n File \"C:\\Python26\\lib\\site-packages\\ipython-0.13.dev-py2.6.egg\\IPython\\frontend\\terminal\\ipapp.py\", line 383, in start\n self.shell.mainloop()\n File \"C:\\Python26\\lib\\site-packages\\ipython-0.13.dev-py2.6.egg\\IPython\\frontend\\terminal\\interactiveshell.py\", line 290, in mainloop\n self.interact(display_banner=display_banner)\n File \"C:\\Python26\\lib\\site-packages\\ipython-0.13.dev-py2.6.egg\\IPython\\frontend\\terminal\\interactiveshell.py\", line 346, in interact\n hlen_b4_cell = self.readline.get_current_history_length()\nAttributeError: 'module' object has no attribute 'get_current_history_length'\n</pre>\n\n\nI see that `setup.py` `requires` pyreadline >= 1.7.1, iff `setupext.check_for_readline()` returns False. However, in my case, it returns True because the function does not check the version, and I have version 1.5. I wasn't sure how best to put the version dependency into the function.\n\n", "before_files": [{"content": "# encoding: utf-8\nfrom __future__ import print_function\n\n__docformat__ = \"restructuredtext en\"\n\n#-------------------------------------------------------------------------------\n# Copyright (C) 2008 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\nimport sys, os\nfrom textwrap import fill\n\ndisplay_status=True\n\ndef check_display(f):\n \"\"\"decorator to allow display methods to be muted by mod.display_status\"\"\"\n def maybe_display(*args, **kwargs):\n if display_status:\n return f(*args, **kwargs)\n return maybe_display\n\n@check_display\ndef print_line(char='='):\n print(char * 76)\n\n@check_display\ndef print_status(package, status):\n initial_indent = \"%22s: \" % package\n indent = ' ' * 24\n print(fill(str(status), width=76,\n initial_indent=initial_indent,\n subsequent_indent=indent))\n\n@check_display\ndef print_message(message):\n indent = ' ' * 24 + \"* \"\n print(fill(str(message), width=76,\n initial_indent=indent,\n subsequent_indent=indent))\n\n@check_display\ndef print_raw(section):\n print(section)\n\n#-------------------------------------------------------------------------------\n# Tests for specific packages\n#-------------------------------------------------------------------------------\n\ndef check_for_ipython():\n try:\n import IPython\n except ImportError:\n print_status(\"IPython\", \"Not found\")\n return False\n else:\n print_status(\"IPython\", IPython.__version__)\n return True\n\ndef check_for_sphinx():\n try:\n import sphinx\n except ImportError:\n print_status('sphinx', \"Not found (required for building documentation)\")\n return False\n else:\n print_status('sphinx', sphinx.__version__)\n return True\n\ndef check_for_pygments():\n try:\n import pygments\n except ImportError:\n print_status('pygments', \"Not found (required for syntax highlighting documentation)\")\n return False\n else:\n print_status('pygments', pygments.__version__)\n return True\n\ndef check_for_nose():\n try:\n import nose\n except ImportError:\n print_status('nose', \"Not found (required for running the test suite)\")\n return False\n else:\n print_status('nose', nose.__version__)\n return True\n\ndef check_for_pexpect():\n try:\n import pexpect\n except ImportError:\n print_status(\"pexpect\", \"no (required for running standalone doctests)\")\n return False\n else:\n print_status(\"pexpect\", pexpect.__version__)\n return True\n\ndef check_for_httplib2():\n try:\n import httplib2\n except ImportError:\n print_status(\"httplib2\", \"no (required for blocking http clients)\")\n return False\n else:\n print_status(\"httplib2\",\"yes\")\n return True\n\ndef check_for_sqlalchemy():\n try:\n import sqlalchemy\n except ImportError:\n print_status(\"sqlalchemy\", \"no (required for the ipython1 notebook)\")\n return False\n else:\n print_status(\"sqlalchemy\",\"yes\")\n return True\n\ndef check_for_simplejson():\n try:\n import simplejson\n except ImportError:\n print_status(\"simplejson\", \"no (required for the ipython1 notebook)\")\n return False\n else:\n print_status(\"simplejson\",\"yes\")\n return True\n\ndef check_for_pyzmq():\n try:\n import zmq\n except ImportError:\n print_status('pyzmq', \"no (required for qtconsole, notebook, and parallel computing capabilities)\")\n return False\n else:\n # pyzmq 2.1.10 adds pyzmq_version_info funtion for returning\n # version as a tuple\n if hasattr(zmq, 'pyzmq_version_info'):\n if zmq.pyzmq_version_info() >= (2,1,4):\n print_status(\"pyzmq\", zmq.__version__)\n return True\n else:\n # this branch can never occur, at least until we update our\n # pyzmq dependency beyond 2.1.10\n return False\n # this is necessarily earlier than 2.1.10, so string comparison is\n # okay\n if zmq.__version__ < '2.1.4':\n print_status('pyzmq', \"no (have %s, but require >= 2.1.4 for\"\n \" qtconsole and parallel computing capabilities)\"%zmq.__version__)\n return False\n else:\n print_status(\"pyzmq\", zmq.__version__)\n return True\n\ndef check_for_readline():\n try:\n import readline\n except ImportError:\n try:\n import pyreadline\n except ImportError:\n print_status('readline', \"no (required for good interactive behavior)\")\n return False\n else:\n print_status('readline', \"yes pyreadline-\"+pyreadline.release.version)\n return True\n else:\n print_status('readline', \"yes\")\n return True\n", "path": "setupext/setupext.py"}]} | 2,660 | 245 |
gh_patches_debug_18353 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-3232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CV gradients - Heisenberg derivatives
Hey everyone. Maybe we can move some of the discussions out of the paper and to here for now? At least as far as writing equations allows...
Christian, your comment on passing the derivative past a non-Gaussian circuit in the Heisenberg and "circuit" picture is very useful. I have not even thought about the case of U being non-Gaussian and something else than homodyne detection, I was more worried about and \hat{O} = \hat{x} and V being non-Gaussian. Would be interested to know if the same thing appears.
This is a really cool lesson in thorough Heisenberg calculus!
</issue>
<code>
[start of pennylane/transforms/hamiltonian_expand.py]
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Contains the hamiltonian expand tape transform
16 """
17 # pylint: disable=protected-access
18 import pennylane as qml
19
20
21 def hamiltonian_expand(tape, group=True):
22 r"""
23 Splits a tape measuring a Hamiltonian expectation into mutliple tapes of Pauli expectations,
24 and provides a function to recombine the results.
25
26 Args:
27 tape (.QuantumTape): the tape used when calculating the expectation value
28 of the Hamiltonian
29 group (bool): Whether to compute disjoint groups of commuting Pauli observables, leading to fewer tapes.
30 If grouping information can be found in the Hamiltonian, it will be used even if group=False.
31
32 Returns:
33 tuple[list[.QuantumTape], function]: Returns a tuple containing a list of
34 quantum tapes to be evaluated, and a function to be applied to these
35 tape executions to compute the expectation value.
36
37 **Example**
38
39 Given a Hamiltonian,
40
41 .. code-block:: python3
42
43 H = qml.PauliY(2) @ qml.PauliZ(1) + 0.5 * qml.PauliZ(2) + qml.PauliZ(1)
44
45 and a tape of the form,
46
47 .. code-block:: python3
48
49 with qml.tape.QuantumTape() as tape:
50 qml.Hadamard(wires=0)
51 qml.CNOT(wires=[0, 1])
52 qml.PauliX(wires=2)
53
54 qml.expval(H)
55
56 We can use the ``hamiltonian_expand`` transform to generate new tapes and a classical
57 post-processing function for computing the expectation value of the Hamiltonian.
58
59 >>> tapes, fn = qml.transforms.hamiltonian_expand(tape)
60
61 We can evaluate these tapes on a device:
62
63 >>> dev = qml.device("default.qubit", wires=3)
64 >>> res = dev.batch_execute(tapes)
65
66 Applying the processing function results in the expectation value of the Hamiltonian:
67
68 >>> fn(res)
69 -0.5
70
71 Fewer tapes can be constructed by grouping commuting observables. This can be achieved
72 by the ``group`` keyword argument:
73
74 .. code-block:: python3
75
76 H = qml.Hamiltonian([1., 2., 3.], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
77
78 with qml.tape.QuantumTape() as tape:
79 qml.Hadamard(wires=0)
80 qml.CNOT(wires=[0, 1])
81 qml.PauliX(wires=2)
82 qml.expval(H)
83
84 With grouping, the Hamiltonian gets split into two groups of observables (here ``[qml.PauliZ(0)]`` and
85 ``[qml.PauliX(1), qml.PauliX(0)]``):
86
87 >>> tapes, fn = qml.transforms.hamiltonian_expand(tape)
88 >>> len(tapes)
89 2
90
91 Without grouping it gets split into three groups (``[qml.PauliZ(0)]``, ``[qml.PauliX(1)]`` and ``[qml.PauliX(0)]``):
92
93 >>> tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
94 >>> len(tapes)
95 3
96
97 Alternatively, if the Hamiltonian has already computed groups, they are used even if ``group=False``:
98
99 .. code-block:: python3
100
101 obs = [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)]
102 coeffs = [1., 2., 3.]
103 H = qml.Hamiltonian(coeffs, obs, grouping_type='qwc')
104
105 # the initialisation already computes grouping information and stores it in the Hamiltonian
106 assert H.grouping_indices is not None
107
108 with qml.tape.QuantumTape() as tape:
109 qml.Hadamard(wires=0)
110 qml.CNOT(wires=[0, 1])
111 qml.PauliX(wires=2)
112 qml.expval(H)
113
114 Grouping information has been used to reduce the number of tapes from 3 to 2:
115
116 >>> tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
117 >>> len(tapes)
118 2
119 """
120
121 hamiltonian = tape.measurements[0].obs
122
123 if (
124 not isinstance(hamiltonian, qml.Hamiltonian)
125 or len(tape.measurements) > 1
126 or tape.measurements[0].return_type != qml.measurements.Expectation
127 ):
128 raise ValueError(
129 "Passed tape must end in `qml.expval(H)`, where H is of type `qml.Hamiltonian`"
130 )
131
132 # note: for backward passes of some frameworks
133 # it is crucial to use the hamiltonian.data attribute,
134 # and not hamiltonian.coeffs when recombining the results
135
136 if group or hamiltonian.grouping_indices is not None:
137
138 if hamiltonian.grouping_indices is None:
139 # explicitly selected grouping, but indices not yet computed
140 hamiltonian.compute_grouping()
141
142 coeff_groupings = [
143 qml.math.stack([hamiltonian.data[i] for i in indices])
144 for indices in hamiltonian.grouping_indices
145 ]
146 obs_groupings = [
147 [hamiltonian.ops[i] for i in indices] for indices in hamiltonian.grouping_indices
148 ]
149
150 # make one tape per grouping, measuring the
151 # observables in that grouping
152 tapes = []
153 for obs in obs_groupings:
154 new_tape = tape.__class__(tape._ops, (qml.expval(o) for o in obs), tape._prep)
155
156 new_tape = new_tape.expand(stop_at=lambda obj: True)
157 tapes.append(new_tape)
158
159 def processing_fn(res_groupings):
160 dot_products = [
161 qml.math.dot(r_group, c_group)
162 for c_group, r_group in zip(coeff_groupings, res_groupings)
163 ]
164 return qml.math.sum(qml.math.stack(dot_products), axis=0)
165
166 return tapes, processing_fn
167
168 coeffs = hamiltonian.data
169
170 # make one tape per observable
171 tapes = []
172 for o in hamiltonian.ops:
173 # pylint: disable=protected-access
174 new_tape = tape.__class__(tape._ops, [qml.expval(o)], tape._prep)
175 tapes.append(new_tape)
176
177 # pylint: disable=function-redefined
178 def processing_fn(res):
179 dot_products = [qml.math.dot(qml.math.squeeze(r), c) for c, r in zip(coeffs, res)]
180 return qml.math.sum(qml.math.stack(dot_products), axis=0)
181
182 return tapes, processing_fn
183
[end of pennylane/transforms/hamiltonian_expand.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pennylane/transforms/hamiltonian_expand.py b/pennylane/transforms/hamiltonian_expand.py
--- a/pennylane/transforms/hamiltonian_expand.py
+++ b/pennylane/transforms/hamiltonian_expand.py
@@ -157,10 +157,21 @@
tapes.append(new_tape)
def processing_fn(res_groupings):
- dot_products = [
- qml.math.dot(r_group, c_group)
- for c_group, r_group in zip(coeff_groupings, res_groupings)
- ]
+ if qml.active_return():
+ dot_products = [
+ qml.math.dot(
+ qml.math.reshape(
+ qml.math.convert_like(r_group, c_group), qml.math.shape(c_group)
+ ),
+ c_group,
+ )
+ for c_group, r_group in zip(coeff_groupings, res_groupings)
+ ]
+ else:
+ dot_products = [
+ qml.math.dot(r_group, c_group)
+ for c_group, r_group in zip(coeff_groupings, res_groupings)
+ ]
return qml.math.sum(qml.math.stack(dot_products), axis=0)
return tapes, processing_fn
| {"golden_diff": "diff --git a/pennylane/transforms/hamiltonian_expand.py b/pennylane/transforms/hamiltonian_expand.py\n--- a/pennylane/transforms/hamiltonian_expand.py\n+++ b/pennylane/transforms/hamiltonian_expand.py\n@@ -157,10 +157,21 @@\n tapes.append(new_tape)\n \n def processing_fn(res_groupings):\n- dot_products = [\n- qml.math.dot(r_group, c_group)\n- for c_group, r_group in zip(coeff_groupings, res_groupings)\n- ]\n+ if qml.active_return():\n+ dot_products = [\n+ qml.math.dot(\n+ qml.math.reshape(\n+ qml.math.convert_like(r_group, c_group), qml.math.shape(c_group)\n+ ),\n+ c_group,\n+ )\n+ for c_group, r_group in zip(coeff_groupings, res_groupings)\n+ ]\n+ else:\n+ dot_products = [\n+ qml.math.dot(r_group, c_group)\n+ for c_group, r_group in zip(coeff_groupings, res_groupings)\n+ ]\n return qml.math.sum(qml.math.stack(dot_products), axis=0)\n \n return tapes, processing_fn\n", "issue": "CV gradients - Heisenberg derivatives\nHey everyone. Maybe we can move some of the discussions out of the paper and to here for now? At least as far as writing equations allows...\r\n\r\nChristian, your comment on passing the derivative past a non-Gaussian circuit in the Heisenberg and \"circuit\" picture is very useful. I have not even thought about the case of U being non-Gaussian and something else than homodyne detection, I was more worried about and \\hat{O} = \\hat{x} and V being non-Gaussian. Would be interested to know if the same thing appears.\r\n\r\nThis is a really cool lesson in thorough Heisenberg calculus!\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContains the hamiltonian expand tape transform\n\"\"\"\n# pylint: disable=protected-access\nimport pennylane as qml\n\n\ndef hamiltonian_expand(tape, group=True):\n r\"\"\"\n Splits a tape measuring a Hamiltonian expectation into mutliple tapes of Pauli expectations,\n and provides a function to recombine the results.\n\n Args:\n tape (.QuantumTape): the tape used when calculating the expectation value\n of the Hamiltonian\n group (bool): Whether to compute disjoint groups of commuting Pauli observables, leading to fewer tapes.\n If grouping information can be found in the Hamiltonian, it will be used even if group=False.\n\n Returns:\n tuple[list[.QuantumTape], function]: Returns a tuple containing a list of\n quantum tapes to be evaluated, and a function to be applied to these\n tape executions to compute the expectation value.\n\n **Example**\n\n Given a Hamiltonian,\n\n .. code-block:: python3\n\n H = qml.PauliY(2) @ qml.PauliZ(1) + 0.5 * qml.PauliZ(2) + qml.PauliZ(1)\n\n and a tape of the form,\n\n .. code-block:: python3\n\n with qml.tape.QuantumTape() as tape:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.PauliX(wires=2)\n\n qml.expval(H)\n\n We can use the ``hamiltonian_expand`` transform to generate new tapes and a classical\n post-processing function for computing the expectation value of the Hamiltonian.\n\n >>> tapes, fn = qml.transforms.hamiltonian_expand(tape)\n\n We can evaluate these tapes on a device:\n\n >>> dev = qml.device(\"default.qubit\", wires=3)\n >>> res = dev.batch_execute(tapes)\n\n Applying the processing function results in the expectation value of the Hamiltonian:\n\n >>> fn(res)\n -0.5\n\n Fewer tapes can be constructed by grouping commuting observables. This can be achieved\n by the ``group`` keyword argument:\n\n .. code-block:: python3\n\n H = qml.Hamiltonian([1., 2., 3.], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])\n\n with qml.tape.QuantumTape() as tape:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.PauliX(wires=2)\n qml.expval(H)\n\n With grouping, the Hamiltonian gets split into two groups of observables (here ``[qml.PauliZ(0)]`` and\n ``[qml.PauliX(1), qml.PauliX(0)]``):\n\n >>> tapes, fn = qml.transforms.hamiltonian_expand(tape)\n >>> len(tapes)\n 2\n\n Without grouping it gets split into three groups (``[qml.PauliZ(0)]``, ``[qml.PauliX(1)]`` and ``[qml.PauliX(0)]``):\n\n >>> tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)\n >>> len(tapes)\n 3\n\n Alternatively, if the Hamiltonian has already computed groups, they are used even if ``group=False``:\n\n .. code-block:: python3\n\n obs = [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)]\n coeffs = [1., 2., 3.]\n H = qml.Hamiltonian(coeffs, obs, grouping_type='qwc')\n\n # the initialisation already computes grouping information and stores it in the Hamiltonian\n assert H.grouping_indices is not None\n\n with qml.tape.QuantumTape() as tape:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.PauliX(wires=2)\n qml.expval(H)\n\n Grouping information has been used to reduce the number of tapes from 3 to 2:\n\n >>> tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)\n >>> len(tapes)\n 2\n \"\"\"\n\n hamiltonian = tape.measurements[0].obs\n\n if (\n not isinstance(hamiltonian, qml.Hamiltonian)\n or len(tape.measurements) > 1\n or tape.measurements[0].return_type != qml.measurements.Expectation\n ):\n raise ValueError(\n \"Passed tape must end in `qml.expval(H)`, where H is of type `qml.Hamiltonian`\"\n )\n\n # note: for backward passes of some frameworks\n # it is crucial to use the hamiltonian.data attribute,\n # and not hamiltonian.coeffs when recombining the results\n\n if group or hamiltonian.grouping_indices is not None:\n\n if hamiltonian.grouping_indices is None:\n # explicitly selected grouping, but indices not yet computed\n hamiltonian.compute_grouping()\n\n coeff_groupings = [\n qml.math.stack([hamiltonian.data[i] for i in indices])\n for indices in hamiltonian.grouping_indices\n ]\n obs_groupings = [\n [hamiltonian.ops[i] for i in indices] for indices in hamiltonian.grouping_indices\n ]\n\n # make one tape per grouping, measuring the\n # observables in that grouping\n tapes = []\n for obs in obs_groupings:\n new_tape = tape.__class__(tape._ops, (qml.expval(o) for o in obs), tape._prep)\n\n new_tape = new_tape.expand(stop_at=lambda obj: True)\n tapes.append(new_tape)\n\n def processing_fn(res_groupings):\n dot_products = [\n qml.math.dot(r_group, c_group)\n for c_group, r_group in zip(coeff_groupings, res_groupings)\n ]\n return qml.math.sum(qml.math.stack(dot_products), axis=0)\n\n return tapes, processing_fn\n\n coeffs = hamiltonian.data\n\n # make one tape per observable\n tapes = []\n for o in hamiltonian.ops:\n # pylint: disable=protected-access\n new_tape = tape.__class__(tape._ops, [qml.expval(o)], tape._prep)\n tapes.append(new_tape)\n\n # pylint: disable=function-redefined\n def processing_fn(res):\n dot_products = [qml.math.dot(qml.math.squeeze(r), c) for c, r in zip(coeffs, res)]\n return qml.math.sum(qml.math.stack(dot_products), axis=0)\n\n return tapes, processing_fn\n", "path": "pennylane/transforms/hamiltonian_expand.py"}]} | 2,803 | 279 |
gh_patches_debug_34007 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-824 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] BotAdapter changes (made for custom adapter / skills compat)
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3148
#3147 - Added additional ContinueConversationAsync overload for custom adapters for skills compatibility.
#3146 - Move 'BotIdentityKey' const to BotAdapter and made it public.
</issue>
<code>
[start of libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from abc import ABC
5 from typing import List, Callable, Awaitable
6
7 from aiohttp.web_request import Request
8 from aiohttp.web_response import Response
9 from botframework.connector.auth import ClaimsIdentity
10 from botbuilder.core import conversation_reference_extension
11 from botbuilder.core import BotAdapter, TurnContext
12 from botbuilder.schema import (
13 Activity,
14 ResourceResponse,
15 ActivityTypes,
16 ConversationAccount,
17 ConversationReference,
18 )
19
20 from .activity_resourceresponse import ActivityResourceResponse
21 from .slack_client import SlackClient
22 from .slack_helper import SlackHelper
23
24
25 class SlackAdapter(BotAdapter, ABC):
26 """
27 BotAdapter that can handle incoming slack events. Incoming slack events are deserialized to an Activity
28 that is dispatch through the middleware and bot pipeline.
29 """
30
31 def __init__(
32 self,
33 client: SlackClient,
34 on_turn_error: Callable[[TurnContext, Exception], Awaitable] = None,
35 ):
36 super().__init__(on_turn_error)
37 self.slack_client = client
38 self.slack_logged_in = False
39
40 async def send_activities(
41 self, context: TurnContext, activities: List[Activity]
42 ) -> List[ResourceResponse]:
43 """
44 Standard BotBuilder adapter method to send a message from the bot to the messaging API.
45
46 :param context: A TurnContext representing the current incoming message and environment.
47 :param activities: An array of outgoing activities to be sent back to the messaging API.
48 :return: An array of ResourceResponse objects containing the IDs that Slack assigned to the sent messages.
49 """
50
51 if not context:
52 raise Exception("TurnContext is required")
53 if not activities:
54 raise Exception("List[Activity] is required")
55
56 responses = []
57
58 for activity in activities:
59 if activity.type == ActivityTypes.message:
60 message = SlackHelper.activity_to_slack(activity)
61
62 slack_response = await self.slack_client.post_message_to_slack(message)
63
64 if slack_response and slack_response.status_code / 100 == 2:
65 resource_response = ActivityResourceResponse(
66 id=slack_response.data["ts"],
67 activity_id=slack_response.data["ts"],
68 conversation=ConversationAccount(
69 id=slack_response.data["channel"]
70 ),
71 )
72
73 responses.append(resource_response)
74
75 return responses
76
77 async def update_activity(self, context: TurnContext, activity: Activity):
78 """
79 Standard BotBuilder adapter method to update a previous message with new content.
80
81 :param context: A TurnContext representing the current incoming message and environment.
82 :param activity: The updated activity in the form '{id: `id of activity to update`, ...}'.
83 :return: A resource response with the Id of the updated activity.
84 """
85
86 if not context:
87 raise Exception("TurnContext is required")
88 if not activity:
89 raise Exception("Activity is required")
90 if not activity.id:
91 raise Exception("Activity.id is required")
92 if not activity.conversation:
93 raise Exception("Activity.conversation is required")
94
95 message = SlackHelper.activity_to_slack(activity)
96 results = await self.slack_client.update(
97 timestamp=message.ts, channel_id=message.channel, text=message.text,
98 )
99
100 if results.status_code / 100 != 2:
101 raise Exception(f"Error updating activity on slack: {results}")
102
103 return ResourceResponse(id=activity.id)
104
105 async def delete_activity(
106 self, context: TurnContext, reference: ConversationReference
107 ):
108 """
109 Standard BotBuilder adapter method to delete a previous message.
110
111 :param context: A TurnContext representing the current incoming message and environment.
112 :param reference: An object in the form "{activityId: `id of message to delete`,
113 conversation: { id: `id of slack channel`}}".
114 """
115
116 if not context:
117 raise Exception("TurnContext is required")
118 if not reference:
119 raise Exception("ConversationReference is required")
120 if not reference.channel_id:
121 raise Exception("ConversationReference.channel_id is required")
122 if not context.activity.timestamp:
123 raise Exception("Activity.timestamp is required")
124
125 await self.slack_client.delete_message(
126 channel_id=reference.channel_id, timestamp=context.activity.timestamp
127 )
128
129 async def continue_conversation(
130 self,
131 reference: ConversationReference,
132 callback: Callable,
133 bot_id: str = None,
134 claims_identity: ClaimsIdentity = None,
135 audience: str = None,
136 ):
137 """
138 Sends a proactive message to a conversation. Call this method to proactively send a message to a conversation.
139 Most _channels require a user to initiate a conversation with a bot before the bot can send activities
140 to the user.
141 :param bot_id: The application ID of the bot. This parameter is ignored in
142 single tenant the Adpters (Console, Test, etc) but is critical to the BotFrameworkAdapter
143 which is multi-tenant aware. </param>
144 :param reference: A reference to the conversation to continue.</param>
145 :param callback: The method to call for the resulting bot turn.</param>
146 :param claims_identity:
147 """
148
149 if not reference:
150 raise Exception("ConversationReference is required")
151 if not callback:
152 raise Exception("callback is required")
153
154 request = TurnContext.apply_conversation_reference(
155 conversation_reference_extension.get_continuation_activity(reference),
156 reference,
157 )
158 context = TurnContext(self, request)
159
160 return await self.run_pipeline(context, callback)
161
162 async def process(self, req: Request, logic: Callable) -> Response:
163 """
164 Accept an incoming webhook request and convert it into a TurnContext which can be processed by the bot's logic.
165
166 :param req: The aoihttp Request object
167 :param logic: The method to call for the resulting bot turn.</param>
168 :return: The aoihttp Response
169 """
170 if not req:
171 raise Exception("Request is required")
172
173 if not self.slack_logged_in:
174 await self.slack_client.login_with_slack()
175 self.slack_logged_in = True
176
177 body = await req.text()
178 slack_body = SlackHelper.deserialize_body(req.content_type, body)
179
180 if slack_body.type == "url_verification":
181 return SlackHelper.response(req, 200, slack_body.challenge)
182
183 if not self.slack_client.verify_signature(req, body):
184 text = "Rejected due to mismatched header signature"
185 return SlackHelper.response(req, 401, text)
186
187 if (
188 not self.slack_client.options.slack_verification_token
189 and slack_body.token != self.slack_client.options.slack_verification_token
190 ):
191 text = f"Rejected due to mismatched verificationToken:{body}"
192 return SlackHelper.response(req, 403, text)
193
194 if slack_body.payload:
195 # handle interactive_message callbacks and block_actions
196 activity = SlackHelper.payload_to_activity(slack_body.payload)
197 elif slack_body.type == "event_callback":
198 activity = await SlackHelper.event_to_activity(
199 slack_body.event, self.slack_client
200 )
201 elif slack_body.command:
202 activity = await SlackHelper.command_to_activity(
203 slack_body, self.slack_client
204 )
205 else:
206 raise Exception(f"Unknown Slack event type {slack_body.type}")
207
208 context = TurnContext(self, activity)
209 await self.run_pipeline(context, logic)
210
211 return SlackHelper.response(req, 200)
212
[end of libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py
--- a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py
+++ b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py
@@ -138,12 +138,11 @@
Sends a proactive message to a conversation. Call this method to proactively send a message to a conversation.
Most _channels require a user to initiate a conversation with a bot before the bot can send activities
to the user.
- :param bot_id: The application ID of the bot. This parameter is ignored in
- single tenant the Adpters (Console, Test, etc) but is critical to the BotFrameworkAdapter
- which is multi-tenant aware. </param>
- :param reference: A reference to the conversation to continue.</param>
- :param callback: The method to call for the resulting bot turn.</param>
- :param claims_identity:
+ :param bot_id: Unused for this override.
+ :param reference: A reference to the conversation to continue.
+ :param callback: The method to call for the resulting bot turn.
+ :param claims_identity: A ClaimsIdentity for the conversation.
+ :param audience: Unused for this override.
"""
if not reference:
@@ -151,11 +150,19 @@
if not callback:
raise Exception("callback is required")
- request = TurnContext.apply_conversation_reference(
- conversation_reference_extension.get_continuation_activity(reference),
- reference,
- )
- context = TurnContext(self, request)
+ if claims_identity:
+ request = conversation_reference_extension.get_continuation_activity(
+ reference
+ )
+ context = TurnContext(self, request)
+ context.turn_state[BotAdapter.BOT_IDENTITY_KEY] = claims_identity
+ context.turn_state[BotAdapter.BOT_CALLBACK_HANDLER_KEY] = callback
+ else:
+ request = TurnContext.apply_conversation_reference(
+ conversation_reference_extension.get_continuation_activity(reference),
+ reference,
+ )
+ context = TurnContext(self, request)
return await self.run_pipeline(context, callback)
| {"golden_diff": "diff --git a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py\n--- a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py\n+++ b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py\n@@ -138,12 +138,11 @@\n Sends a proactive message to a conversation. Call this method to proactively send a message to a conversation.\r\n Most _channels require a user to initiate a conversation with a bot before the bot can send activities\r\n to the user.\r\n- :param bot_id: The application ID of the bot. This parameter is ignored in\r\n- single tenant the Adpters (Console, Test, etc) but is critical to the BotFrameworkAdapter\r\n- which is multi-tenant aware. </param>\r\n- :param reference: A reference to the conversation to continue.</param>\r\n- :param callback: The method to call for the resulting bot turn.</param>\r\n- :param claims_identity:\r\n+ :param bot_id: Unused for this override.\r\n+ :param reference: A reference to the conversation to continue.\r\n+ :param callback: The method to call for the resulting bot turn.\r\n+ :param claims_identity: A ClaimsIdentity for the conversation.\r\n+ :param audience: Unused for this override.\r\n \"\"\"\r\n \r\n if not reference:\r\n@@ -151,11 +150,19 @@\n if not callback:\r\n raise Exception(\"callback is required\")\r\n \r\n- request = TurnContext.apply_conversation_reference(\r\n- conversation_reference_extension.get_continuation_activity(reference),\r\n- reference,\r\n- )\r\n- context = TurnContext(self, request)\r\n+ if claims_identity:\r\n+ request = conversation_reference_extension.get_continuation_activity(\r\n+ reference\r\n+ )\r\n+ context = TurnContext(self, request)\r\n+ context.turn_state[BotAdapter.BOT_IDENTITY_KEY] = claims_identity\r\n+ context.turn_state[BotAdapter.BOT_CALLBACK_HANDLER_KEY] = callback\r\n+ else:\r\n+ request = TurnContext.apply_conversation_reference(\r\n+ conversation_reference_extension.get_continuation_activity(reference),\r\n+ reference,\r\n+ )\r\n+ context = TurnContext(self, request)\r\n \r\n return await self.run_pipeline(context, callback)\n", "issue": "[PORT] BotAdapter changes (made for custom adapter / skills compat)\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3148\n\n#3147 - Added additional ContinueConversationAsync overload for custom adapters for skills compatibility.\r\n#3146 - Move 'BotIdentityKey' const to BotAdapter and made it public.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom abc import ABC\r\nfrom typing import List, Callable, Awaitable\r\n\r\nfrom aiohttp.web_request import Request\r\nfrom aiohttp.web_response import Response\r\nfrom botframework.connector.auth import ClaimsIdentity\r\nfrom botbuilder.core import conversation_reference_extension\r\nfrom botbuilder.core import BotAdapter, TurnContext\r\nfrom botbuilder.schema import (\r\n Activity,\r\n ResourceResponse,\r\n ActivityTypes,\r\n ConversationAccount,\r\n ConversationReference,\r\n)\r\n\r\nfrom .activity_resourceresponse import ActivityResourceResponse\r\nfrom .slack_client import SlackClient\r\nfrom .slack_helper import SlackHelper\r\n\r\n\r\nclass SlackAdapter(BotAdapter, ABC):\r\n \"\"\"\r\n BotAdapter that can handle incoming slack events. Incoming slack events are deserialized to an Activity\r\n that is dispatch through the middleware and bot pipeline.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n client: SlackClient,\r\n on_turn_error: Callable[[TurnContext, Exception], Awaitable] = None,\r\n ):\r\n super().__init__(on_turn_error)\r\n self.slack_client = client\r\n self.slack_logged_in = False\r\n\r\n async def send_activities(\r\n self, context: TurnContext, activities: List[Activity]\r\n ) -> List[ResourceResponse]:\r\n \"\"\"\r\n Standard BotBuilder adapter method to send a message from the bot to the messaging API.\r\n\r\n :param context: A TurnContext representing the current incoming message and environment.\r\n :param activities: An array of outgoing activities to be sent back to the messaging API.\r\n :return: An array of ResourceResponse objects containing the IDs that Slack assigned to the sent messages.\r\n \"\"\"\r\n\r\n if not context:\r\n raise Exception(\"TurnContext is required\")\r\n if not activities:\r\n raise Exception(\"List[Activity] is required\")\r\n\r\n responses = []\r\n\r\n for activity in activities:\r\n if activity.type == ActivityTypes.message:\r\n message = SlackHelper.activity_to_slack(activity)\r\n\r\n slack_response = await self.slack_client.post_message_to_slack(message)\r\n\r\n if slack_response and slack_response.status_code / 100 == 2:\r\n resource_response = ActivityResourceResponse(\r\n id=slack_response.data[\"ts\"],\r\n activity_id=slack_response.data[\"ts\"],\r\n conversation=ConversationAccount(\r\n id=slack_response.data[\"channel\"]\r\n ),\r\n )\r\n\r\n responses.append(resource_response)\r\n\r\n return responses\r\n\r\n async def update_activity(self, context: TurnContext, activity: Activity):\r\n \"\"\"\r\n Standard BotBuilder adapter method to update a previous message with new content.\r\n\r\n :param context: A TurnContext representing the current incoming message and environment.\r\n :param activity: The updated activity in the form '{id: `id of activity to update`, ...}'.\r\n :return: A resource response with the Id of the updated activity.\r\n \"\"\"\r\n\r\n if not context:\r\n raise Exception(\"TurnContext is required\")\r\n if not activity:\r\n raise Exception(\"Activity is required\")\r\n if not activity.id:\r\n raise Exception(\"Activity.id is required\")\r\n if not activity.conversation:\r\n raise Exception(\"Activity.conversation is required\")\r\n\r\n message = SlackHelper.activity_to_slack(activity)\r\n results = await self.slack_client.update(\r\n timestamp=message.ts, channel_id=message.channel, text=message.text,\r\n )\r\n\r\n if results.status_code / 100 != 2:\r\n raise Exception(f\"Error updating activity on slack: {results}\")\r\n\r\n return ResourceResponse(id=activity.id)\r\n\r\n async def delete_activity(\r\n self, context: TurnContext, reference: ConversationReference\r\n ):\r\n \"\"\"\r\n Standard BotBuilder adapter method to delete a previous message.\r\n\r\n :param context: A TurnContext representing the current incoming message and environment.\r\n :param reference: An object in the form \"{activityId: `id of message to delete`,\r\n conversation: { id: `id of slack channel`}}\".\r\n \"\"\"\r\n\r\n if not context:\r\n raise Exception(\"TurnContext is required\")\r\n if not reference:\r\n raise Exception(\"ConversationReference is required\")\r\n if not reference.channel_id:\r\n raise Exception(\"ConversationReference.channel_id is required\")\r\n if not context.activity.timestamp:\r\n raise Exception(\"Activity.timestamp is required\")\r\n\r\n await self.slack_client.delete_message(\r\n channel_id=reference.channel_id, timestamp=context.activity.timestamp\r\n )\r\n\r\n async def continue_conversation(\r\n self,\r\n reference: ConversationReference,\r\n callback: Callable,\r\n bot_id: str = None,\r\n claims_identity: ClaimsIdentity = None,\r\n audience: str = None,\r\n ):\r\n \"\"\"\r\n Sends a proactive message to a conversation. Call this method to proactively send a message to a conversation.\r\n Most _channels require a user to initiate a conversation with a bot before the bot can send activities\r\n to the user.\r\n :param bot_id: The application ID of the bot. This parameter is ignored in\r\n single tenant the Adpters (Console, Test, etc) but is critical to the BotFrameworkAdapter\r\n which is multi-tenant aware. </param>\r\n :param reference: A reference to the conversation to continue.</param>\r\n :param callback: The method to call for the resulting bot turn.</param>\r\n :param claims_identity:\r\n \"\"\"\r\n\r\n if not reference:\r\n raise Exception(\"ConversationReference is required\")\r\n if not callback:\r\n raise Exception(\"callback is required\")\r\n\r\n request = TurnContext.apply_conversation_reference(\r\n conversation_reference_extension.get_continuation_activity(reference),\r\n reference,\r\n )\r\n context = TurnContext(self, request)\r\n\r\n return await self.run_pipeline(context, callback)\r\n\r\n async def process(self, req: Request, logic: Callable) -> Response:\r\n \"\"\"\r\n Accept an incoming webhook request and convert it into a TurnContext which can be processed by the bot's logic.\r\n\r\n :param req: The aoihttp Request object\r\n :param logic: The method to call for the resulting bot turn.</param>\r\n :return: The aoihttp Response\r\n \"\"\"\r\n if not req:\r\n raise Exception(\"Request is required\")\r\n\r\n if not self.slack_logged_in:\r\n await self.slack_client.login_with_slack()\r\n self.slack_logged_in = True\r\n\r\n body = await req.text()\r\n slack_body = SlackHelper.deserialize_body(req.content_type, body)\r\n\r\n if slack_body.type == \"url_verification\":\r\n return SlackHelper.response(req, 200, slack_body.challenge)\r\n\r\n if not self.slack_client.verify_signature(req, body):\r\n text = \"Rejected due to mismatched header signature\"\r\n return SlackHelper.response(req, 401, text)\r\n\r\n if (\r\n not self.slack_client.options.slack_verification_token\r\n and slack_body.token != self.slack_client.options.slack_verification_token\r\n ):\r\n text = f\"Rejected due to mismatched verificationToken:{body}\"\r\n return SlackHelper.response(req, 403, text)\r\n\r\n if slack_body.payload:\r\n # handle interactive_message callbacks and block_actions\r\n activity = SlackHelper.payload_to_activity(slack_body.payload)\r\n elif slack_body.type == \"event_callback\":\r\n activity = await SlackHelper.event_to_activity(\r\n slack_body.event, self.slack_client\r\n )\r\n elif slack_body.command:\r\n activity = await SlackHelper.command_to_activity(\r\n slack_body, self.slack_client\r\n )\r\n else:\r\n raise Exception(f\"Unknown Slack event type {slack_body.type}\")\r\n\r\n context = TurnContext(self, activity)\r\n await self.run_pipeline(context, logic)\r\n\r\n return SlackHelper.response(req, 200)\r\n", "path": "libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_adapter.py"}]} | 2,786 | 521 |
gh_patches_debug_31007 | rasdani/github-patches | git_diff | Flexget__Flexget-2274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Torrentday Seeders and Leechers Causes an Error.
File "/usr/local/flexget/env/lib/python2.7/site-packages/flexget/plugins/sites/torrentday.py", line 174, in search
entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
File "/usr/local/flexget/env/lib/python2.7/site-packages/bs4/element.py", line 1807, in __getattr__
"ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?" % key
AttributeError: ResultSet object has no attribute 'contents'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?
2018-12-08 16:01 WARNING task AVI2MKV Aborting task (plugin: discover)
2018-12-08 16:01 DEBUG task_queue task AVI2MKV aborted: TaskAbort(reason=BUG: Unhandled error in plugin discover: ResultSet object has no attribute 'contents'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?, silent=False)
</issue>
<code>
[start of flexget/plugins/sites/torrentday.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3 from future.moves.urllib.parse import quote
4
5 import re
6 import logging
7
8 from requests.exceptions import RequestException
9
10 from flexget import plugin
11 from flexget.config_schema import one_or_more
12 from flexget.entry import Entry
13 from flexget.event import event
14 from flexget.plugin import PluginError
15 from flexget.plugins.internal.urlrewriting import UrlRewritingError
16 from flexget.utils import requests
17 from flexget.utils.soup import get_soup
18 from flexget.utils.search import torrent_availability, normalize_unicode
19 from flexget.utils.tools import parse_filesize
20
21 log = logging.getLogger('torrentday')
22
23 CATEGORIES = {
24 'all': 0,
25 # Movies
26 'mov480p': 25,
27 'movHD': 11,
28 'movBD': 5,
29 'movDVD': 3,
30 'movMP4': 21,
31 'movNonEnglish': 22,
32 'movPACKS': 13,
33 'movSDx264': 44,
34 'movX265': 48,
35 'movXVID': 1,
36
37 # TV
38 'tv480p': 24,
39 'tvBRD': 32,
40 'tvDVD': 31,
41 'tvDVDrip': 33,
42 'tvMOBILE': 46,
43 'tvPACKS': 14,
44 'tvSDx264': 26,
45 'tvHDx264': 7,
46 'tvX265': 34,
47 'tvXVID': 2
48 }
49
50
51 class UrlRewriteTorrentday(object):
52 """
53 Torrentday urlrewriter and search plugin.
54
55 torrentday:
56 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies
57 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous
58 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES
59 rss_key: xxxxxxxxx (required) get this from your profile page
60 category: xxxxxxxx
61
62 Category can be one of
63 ID from browsing site OR 'name'
64 movies:
65 mov480p, movHD, movBD, movDVD,
66 movMP4, movNonEnglish, movPACKS,
67 movSDx264, movX265, movXVID
68 tv:
69 tv480p, tvBRD, tvDVD, tvDVDrip,
70 tvMOBILE, tvPACKS, tvSDx264,
71 tvHDx264, tvX265, tvXVID
72 """
73
74 schema = {
75 'type': 'object',
76 'properties': {
77 'rss_key': {'type': 'string'},
78 'uid': {'type': 'string'},
79 'passkey': {'type': 'string'},
80 'cfduid': {'type': 'string'},
81 'category': {
82 'oneOf': [
83 {'type': 'integer'},
84 {'type': 'string', 'enum': list(CATEGORIES)},
85 ]
86 },
87 },
88 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],
89 'additionalProperties': False
90 }
91
92 # urlrewriter API
93 def url_rewritable(self, task, entry):
94 url = entry['url']
95 if url.find('.torrent'):
96 return False
97 if url.startswith('https://www.torrentday.com'):
98 return True
99 return False
100
101 # urlrewriter API
102 def url_rewrite(self, task, entry):
103 if 'url' not in entry:
104 log.error('Didn\'t actually get a URL...')
105 else:
106 log.debug('Got the URL: %s', entry['url'])
107 if entry['url'].startswith('https://www.torrentday.com/browse'):
108 # use search
109 results = self.search(task, entry)
110 if not results:
111 raise UrlRewritingError('No search results found')
112 entry['url'] = results[0]['url']
113
114 @plugin.internet(log)
115 def search(self, task, entry, config=None):
116 """
117 Search for name from torrentday.
118 """
119
120 categories = config.get('category', 'all')
121 # Make sure categories is a list
122 if not isinstance(categories, list):
123 categories = [categories]
124 # If there are any text categories, turn them into their id number
125 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
126 params = { 'cata': 'yes', 'c{}'.format(','.join(str(c) for c in categories)): 1, 'clear-new': 1}
127 entries = set()
128 for search_string in entry.get('search_strings', [entry['title']]):
129
130 url = 'https://www.torrentday.com/t'
131 params['q'] = normalize_unicode(search_string).replace(':', '')
132 cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }
133
134 try:
135 page = requests.get(url, params=params, cookies=cookies).content
136 except RequestException as e:
137 raise PluginError('Could not connect to torrentday: {}'.format(e))
138
139 soup = get_soup(page)
140 # the first row is the header so skip it
141 for tr in soup.find_all('tr')[1:]:
142 entry = Entry()
143 # find the torrent names
144 td = tr.find('td', { 'class': 'torrentNameInfo' })
145 if not td:
146 log.warning('Could not find entry torrentNameInfo for %s.', search_string)
147 continue
148 title = td.find('a')
149 if not title:
150 log.warning('Could not determine title for %s.', search_string)
151 continue
152 entry['title'] = title.contents[0]
153 log.debug('title: %s', title.contents[0])
154
155 # find download link
156 torrent_url = tr.find('td', { 'class': 'ac' })
157 if not torrent_url:
158 log.warning('Could not determine download link for %s.', search_string)
159 continue
160 torrent_url = torrent_url.find('a').get('href')
161
162 # construct download URL
163 torrent_url = ( 'https://www.torrentday.com/' + torrent_url + '?torrent_pass=' + config['rss_key'] )
164 log.debug('RSS-ified download link: %s', torrent_url)
165 entry['url'] = torrent_url
166
167 # us tr object for seeders/leechers
168 seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})
169 leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})
170 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
171 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
172 entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
173
174 # use tr object for size
175 size = tr.find('td', text=re.compile('([\.\d]+) ([TGMKk]?)B')).contents[0]
176 size = re.search('([\.\d]+) ([TGMKk]?)B', str(size))
177
178 entry['content_size'] = parse_filesize(size.group(0))
179
180 entries.add(entry)
181
182 return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))
183
184
185 @event('plugin.register')
186 def register_plugin():
187 plugin.register(UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2)
188
[end of flexget/plugins/sites/torrentday.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/sites/torrentday.py b/flexget/plugins/sites/torrentday.py
--- a/flexget/plugins/sites/torrentday.py
+++ b/flexget/plugins/sites/torrentday.py
@@ -136,9 +136,15 @@
except RequestException as e:
raise PluginError('Could not connect to torrentday: {}'.format(e))
- soup = get_soup(page)
+ # the following should avoid table being None due to a malformed
+ # html in td search results
+ soup = get_soup(page).contents[1].contents[1].next.next.nextSibling
+ table = soup.find('table', {'id': 'torrentTable'})
+ if (table is None):
+ raise PluginError('Search returned by torrentday appears to be empty or malformed.')
+
# the first row is the header so skip it
- for tr in soup.find_all('tr')[1:]:
+ for tr in table.find_all('tr')[1:]:
entry = Entry()
# find the torrent names
td = tr.find('td', { 'class': 'torrentNameInfo' })
@@ -165,8 +171,8 @@
entry['url'] = torrent_url
# us tr object for seeders/leechers
- seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})
- leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})
+ seeders = tr.find('td', { 'class': 'ac seedersInfo'})
+ leechers = tr.find('td', { 'class': 'ac leechersInfo'})
entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
| {"golden_diff": "diff --git a/flexget/plugins/sites/torrentday.py b/flexget/plugins/sites/torrentday.py\n--- a/flexget/plugins/sites/torrentday.py\n+++ b/flexget/plugins/sites/torrentday.py\n@@ -136,9 +136,15 @@\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n \n- soup = get_soup(page)\n+ # the following should avoid table being None due to a malformed\n+ # html in td search results\n+ soup = get_soup(page).contents[1].contents[1].next.next.nextSibling\n+ table = soup.find('table', {'id': 'torrentTable'})\n+ if (table is None):\n+ raise PluginError('Search returned by torrentday appears to be empty or malformed.')\n+\n # the first row is the header so skip it\n- for tr in soup.find_all('tr')[1:]:\n+ for tr in table.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', { 'class': 'torrentNameInfo' })\n@@ -165,8 +171,8 @@\n entry['url'] = torrent_url\n \n # us tr object for seeders/leechers\n- seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})\n- leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})\n+ seeders = tr.find('td', { 'class': 'ac seedersInfo'})\n+ leechers = tr.find('td', { 'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n", "issue": "Torrentday Seeders and Leechers Causes an Error.\n File \"/usr/local/flexget/env/lib/python2.7/site-packages/flexget/plugins/sites/torrentday.py\", line 174, in search\r\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\r\n File \"/usr/local/flexget/env/lib/python2.7/site-packages/bs4/element.py\", line 1807, in __getattr__\r\n \"ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?\" % key\r\nAttributeError: ResultSet object has no attribute 'contents'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?\r\n2018-12-08 16:01 WARNING task AVI2MKV Aborting task (plugin: discover)\r\n2018-12-08 16:01 DEBUG task_queue task AVI2MKV aborted: TaskAbort(reason=BUG: Unhandled error in plugin discover: ResultSet object has no attribute 'contents'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?, silent=False)\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote\n\nimport re\nimport logging\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.search import torrent_availability, normalize_unicode\nfrom flexget.utils.tools import parse_filesize\n\nlog = logging.getLogger('torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2\n}\n\n\nclass UrlRewriteTorrentday(object):\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of \n ID from browsing site OR 'name'\n movies:\n mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvPACKS, tvSDx264, \n tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'type': 'string'},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': {\n 'oneOf': [\n {'type': 'integer'},\n {'type': 'string', 'enum': list(CATEGORIES)},\n ]\n },\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n log.error('Didn\\'t actually get a URL...')\n else:\n log.debug('Got the URL: %s', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(log)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = { 'cata': 'yes', 'c{}'.format(','.join(str(c) for c in categories)): 1, 'clear-new': 1}\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n\n soup = get_soup(page)\n # the first row is the header so skip it\n for tr in soup.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', { 'class': 'torrentNameInfo' })\n if not td:\n log.warning('Could not find entry torrentNameInfo for %s.', search_string)\n continue\n title = td.find('a')\n if not title:\n log.warning('Could not determine title for %s.', search_string)\n continue\n entry['title'] = title.contents[0]\n log.debug('title: %s', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', { 'class': 'ac' })\n if not torrent_url:\n log.warning('Could not determine download link for %s.', search_string)\n continue\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = ( 'https://www.torrentday.com/' + torrent_url + '?torrent_pass=' + config['rss_key'] )\n log.debug('RSS-ified download link: %s', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders = tr.find_all('td', { 'class': 'ac seedersInfo'})\n leechers = tr.find_all('td', { 'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n\n # use tr object for size\n size = tr.find('td', text=re.compile('([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search('([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/torrentday.py"}]} | 3,017 | 435 |
gh_patches_debug_527 | rasdani/github-patches | git_diff | mlflow__mlflow-351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UUID dependency breaks python 3 under AWS linux
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Amazon linux deep learning AMI 12.0 (like CentOS)
- **MLflow installed from (source or binary)**: source (PyPI)
- **MLflow version (run ``mlflow --version``)**: mlflow, version 0.5.0
- **Python version**: Python 3.6.6
- **npm version (if running the dev UI): N/A
- **Exact command to reproduce**: python -c "import mlflow"
### Describe the problem
```pip install mlflow``` also installs uuid==1.30 (which breaks under python3)
The default "uuid" library is included in the python standard library. On the AWS instance, the installed version shadows the default, and includes syntax which is only valid in python2.
On the computer I'm connecting to the instance from, the same script does not produce any errors, but ```uuid.__file__``` points to a standard library version and not the packaged 1.30
### Source code / logs
Full reproduction from a newly created instance:
```
source activate tensorflow_p36
virtualenv env --system-site-packages --python=$(which python) env
source env/bin/activate
pip install mlflow
python -c "import mlflow"
```
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/__init__.py", line 33, in <module>
import mlflow.projects as projects # noqa
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 17, in <module>
import mlflow.tracking as tracking
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/__init__.py", line 7, in <module>
from mlflow.tracking.service import MLflowService, get_service
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/service.py", line 13, in <module>
from mlflow.tracking.utils import _get_store
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/utils.py", line 8, in <module>
from mlflow.store.file_store import FileStore
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/store/file_store.py", line 3, in <module>
import uuid
File "/home/ec2-user/scratch/env/lib/python3.6/site-packages/uuid.py", line 138
if not 0 <= time_low < 1<<32L:
^
SyntaxError: invalid syntax
```
</issue>
<code>
[start of setup.py]
1 import imp
2 import os
3 from setuptools import setup, find_packages
4
5 version = imp.load_source(
6 'mlflow.version', os.path.join('mlflow', 'version.py')).VERSION
7
8
9 # Get a list of all files in the JS directory to include in our module
10 def package_files(directory):
11 paths = []
12 for (path, directories, filenames) in os.walk(directory):
13 for filename in filenames:
14 paths.append(os.path.join('..', path, filename))
15 return paths
16
17
18 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build
19 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html"
20 js_files = package_files('mlflow/server/js/build')
21 sagmaker_server_files = package_files("mlflow/sagemaker/container")
22
23 setup(
24 name='mlflow',
25 version=version,
26 packages=find_packages(exclude=['tests', 'tests.*']),
27 package_data={"mlflow": js_files + sagmaker_server_files},
28 install_requires=[
29 'awscli',
30 'click>=6.7',
31 'databricks-cli>=0.8.0',
32 'requests>=2.17.3',
33 'six>=1.10.0',
34 'uuid',
35 'gunicorn',
36 'Flask',
37 'numpy',
38 'pandas',
39 'scipy',
40 'scikit-learn',
41 'python-dateutil',
42 'protobuf>=3.6.0',
43 'gitpython>=2.1.0',
44 'pyyaml',
45 'boto3',
46 'querystring_parser',
47 'simplejson',
48 ],
49 entry_points='''
50 [console_scripts]
51 mlflow=mlflow.cli:cli
52 ''',
53 zip_safe=False,
54 author='Databricks',
55 description='MLflow: An ML Workflow Tool',
56 long_description=open('README.rst').read(),
57 license='Apache License 2.0',
58 classifiers=[
59 'Intended Audience :: Developers',
60 'Programming Language :: Python :: 2.7',
61 'Programming Language :: Python :: 3.6',
62 ],
63 keywords='ml ai databricks',
64 url='https://mlflow.org/'
65 )
66
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,7 +31,6 @@
'databricks-cli>=0.8.0',
'requests>=2.17.3',
'six>=1.10.0',
- 'uuid',
'gunicorn',
'Flask',
'numpy',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,6 @@\n 'databricks-cli>=0.8.0',\n 'requests>=2.17.3',\n 'six>=1.10.0',\n- 'uuid',\n 'gunicorn',\n 'Flask',\n 'numpy',\n", "issue": "UUID dependency breaks python 3 under AWS linux\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Amazon linux deep learning AMI 12.0 (like CentOS)\r\n- **MLflow installed from (source or binary)**: source (PyPI)\r\n- **MLflow version (run ``mlflow --version``)**: mlflow, version 0.5.0\r\n- **Python version**: Python 3.6.6\r\n- **npm version (if running the dev UI): N/A\r\n- **Exact command to reproduce**: python -c \"import mlflow\"\r\n\r\n### Describe the problem\r\n```pip install mlflow``` also installs uuid==1.30 (which breaks under python3)\r\n\r\nThe default \"uuid\" library is included in the python standard library. On the AWS instance, the installed version shadows the default, and includes syntax which is only valid in python2. \r\nOn the computer I'm connecting to the instance from, the same script does not produce any errors, but ```uuid.__file__``` points to a standard library version and not the packaged 1.30\r\n\r\n### Source code / logs\r\nFull reproduction from a newly created instance:\r\n```\r\nsource activate tensorflow_p36\r\nvirtualenv env --system-site-packages --python=$(which python) env\r\nsource env/bin/activate\r\npip install mlflow\r\npython -c \"import mlflow\"\r\n```\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/__init__.py\", line 33, in <module>\r\n import mlflow.projects as projects # noqa\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/projects/__init__.py\", line 17, in <module>\r\n import mlflow.tracking as tracking\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/__init__.py\", line 7, in <module>\r\n from mlflow.tracking.service import MLflowService, get_service\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/service.py\", line 13, in <module>\r\n from mlflow.tracking.utils import _get_store\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/tracking/utils.py\", line 8, in <module>\r\n from mlflow.store.file_store import FileStore\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/mlflow/store/file_store.py\", line 3, in <module>\r\n import uuid\r\n File \"/home/ec2-user/scratch/env/lib/python3.6/site-packages/uuid.py\", line 138\r\n if not 0 <= time_low < 1<<32L:\r\n ^\r\nSyntaxError: invalid syntax\r\n```\n", "before_files": [{"content": "import imp\nimport os\nfrom setuptools import setup, find_packages\n\nversion = imp.load_source(\n 'mlflow.version', os.path.join('mlflow', 'version.py')).VERSION\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files('mlflow/server/js/build')\nsagmaker_server_files = package_files(\"mlflow/sagemaker/container\")\n\nsetup(\n name='mlflow',\n version=version,\n packages=find_packages(exclude=['tests', 'tests.*']),\n package_data={\"mlflow\": js_files + sagmaker_server_files},\n install_requires=[\n 'awscli',\n 'click>=6.7',\n 'databricks-cli>=0.8.0',\n 'requests>=2.17.3',\n 'six>=1.10.0',\n 'uuid',\n 'gunicorn',\n 'Flask',\n 'numpy',\n 'pandas',\n 'scipy',\n 'scikit-learn',\n 'python-dateutil',\n 'protobuf>=3.6.0',\n 'gitpython>=2.1.0',\n 'pyyaml',\n 'boto3',\n 'querystring_parser',\n 'simplejson',\n ],\n entry_points='''\n [console_scripts]\n mlflow=mlflow.cli:cli\n ''',\n zip_safe=False,\n author='Databricks',\n description='MLflow: An ML Workflow Tool',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='ml ai databricks',\n url='https://mlflow.org/'\n)\n", "path": "setup.py"}]} | 1,793 | 86 |
gh_patches_debug_29665 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide Test Data
For developing and testing purposes, we need a fast way to import valid test data into the database. I propose using fixtures or a suitable library for this.
Provide Test Data
For developing and testing purposes, we need a fast way to import valid test data into the database. I propose using fixtures or a suitable library for this.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 import os
4 from setuptools import find_packages, setup
5
6 setup(
7 name="integreat_cms",
8 version="0.0.13",
9 packages=find_packages("backend"),
10 package_dir={'':'backend'},
11 include_package_data=True,
12 scripts=['backend/integreat-cms'],
13 data_files= [("lib/integreat-{}".format(root), [os.path.join(root, f) for f in files])
14 for root, dirs, files in os.walk('backend/cms/templates/')] +
15 [("lib/integreat-{}".format(root), [os.path.join(root, f) for f in files])
16 for root, dirs, files in os.walk('backend/cms/static/')] +
17 [('usr/lib/systemd/system/', ['systemd/[email protected]'])],
18 install_requires=[
19 "beautifulsoup4==4.8.0",
20 "cffi==1.12.3",
21 "Django==1.11.20",
22 "django-filer==1.5.0",
23 "django-mptt==0.9.1",
24 "django-widget-tweaks==1.4.3",
25 "djangorestframework==3.9.4",
26 "drf-yasg==1.16.1",
27 "idna==2.6",
28 "lxml==4.3.3",
29 "packaging==19.0",
30 "psycopg2-binary==2.8.3",
31 "pylint==2.3.1",
32 "pylint-django==2.0.11",
33 "pylint_runner==0.5.4",
34 "python-dateutil==2.8.0",
35 "requests==2.22.0",
36 "rules==2.0.1",
37 "six==1.11.0",
38 ],
39 author="Integreat App Project",
40 author_email="[email protected]",
41 description="Content Management System for the Integreat App",
42 license="GPL-2.0-or-later",
43 keywords="Django Integreat CMS",
44 url="http://github.com/Integreat/",
45 classifiers=[
46 'Development Status :: 5 - Production/Stable',
47 'Intended Audience :: Developers',
48 'Programming Language :: Python :: 3.4',
49 'Programming Language :: Python :: 3.5',
50 'Programming Language :: Python :: 3.6',
51 ]
52 )
53
[end of setup.py]
[start of backend/backend/docker_settings.py]
1 # pylint: disable=wildcard-import
2 # pylint: disable=unused-wildcard-import
3 from .settings import *
4
5 # Database
6 # https://docs.djangoproject.com/en/1.11/ref/settings/#databases
7
8 DATABASE_DICT = {
9 'default': {
10 'ENGINE': 'django.db.backends.postgresql_psycopg2',
11 'NAME': 'integreat',
12 'USER': 'integreat',
13 'PASSWORD': 'password',
14 'HOST': 'localhost',
15 'PORT': '5432',
16 }
17 }
18
[end of backend/backend/docker_settings.py]
[start of backend/backend/settings.py]
1 """
2 Django settings for backend project.
3
4 Generated by 'django-admin startproject' using Django 1.11.11.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.11/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.11/ref/settings/
11 """
12
13 import os
14
15 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
18
19 # Quick-start development settings - unsuitable for production
20 # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
21
22 # SECURITY WARNING: keep the secret key used in production secret!
23 SECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'
24
25 # SECURITY WARNING: don't run with debug turned on in production!
26 DEBUG = True
27
28 ALLOWED_HOSTS = [
29 'localhost',
30 '127.0.0.1',
31 '0.0.0.0'
32 ]
33
34
35 # Application definition
36
37 INSTALLED_APPS = [
38 'cms.apps.CmsConfig',
39 'django.contrib.admin',
40 'django.contrib.auth',
41 'django.contrib.contenttypes',
42 'django.contrib.messages',
43 'django.contrib.sessions',
44 'django.contrib.staticfiles',
45 'widget_tweaks',
46 'easy_thumbnails',
47 'filer',
48 'drf_yasg',
49 'mptt',
50 'rest_framework',
51 'rules.apps.AutodiscoverRulesConfig',
52 ]
53
54 MIDDLEWARE = [
55 'django.middleware.security.SecurityMiddleware',
56 'django.contrib.sessions.middleware.SessionMiddleware',
57 'django.middleware.locale.LocaleMiddleware',
58 'django.middleware.common.CommonMiddleware',
59 'django.middleware.csrf.CsrfViewMiddleware',
60 'django.contrib.auth.middleware.AuthenticationMiddleware',
61 'django.contrib.messages.middleware.MessageMiddleware',
62 'django.middleware.clickjacking.XFrameOptionsMiddleware',
63 ]
64
65 ROOT_URLCONF = 'backend.urls'
66 THUMBNAIL_HIGH_RESOLUTION = True
67
68 TEMPLATES = [
69 {
70 'BACKEND': 'django.template.backends.django.DjangoTemplates',
71 'DIRS': [],
72 'APP_DIRS': True,
73 'OPTIONS': {
74 'context_processors': [
75 'django.template.context_processors.debug',
76 'django.template.context_processors.request',
77 'django.contrib.auth.context_processors.auth',
78 'django.contrib.messages.context_processors.messages',
79 'backend.context_processors.region_slug_processor',
80 ],
81 },
82 },
83 ]
84
85 WSGI_APPLICATION = 'backend.wsgi.application'
86
87
88 # Database
89 # https://docs.djangoproject.com/en/1.11/ref/settings/#databases
90
91 DATABASES = {
92 'default': {
93 'ENGINE': 'django.db.backends.postgresql_psycopg2',
94 'NAME': 'integreat',
95 'USER': 'integreat',
96 'PASSWORD': 'password',
97 'HOST': 'localhost',
98 'PORT': '5433',
99 }
100 }
101
102 # Directory for initial database contents
103
104 FIXTURE_DIRS = (
105 os.path.join(BASE_DIR, 'cms/fixtures/'),
106 )
107
108 # Authentication backends
109
110 AUTHENTICATION_BACKENDS = (
111 'rules.permissions.ObjectPermissionBackend',
112 'django.contrib.auth.backends.ModelBackend', # this is default
113 )
114
115
116 # Password validation
117 # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
118
119 AUTH_PASSWORD_VALIDATORS = [
120 {
121 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
122 },
123 {
124 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
125 },
126 {
127 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
128 },
129 {
130 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
131 },
132 ]
133
134
135 # Internationalization
136 # https://docs.djangoproject.com/en/1.11/topics/i18n/
137
138 LANGUAGES = (
139 ('en-us', 'English'),
140 ('de-de', 'Deutsch'),
141 )
142
143 LOCALE_PATHS = (
144 os.path.join(BASE_DIR, 'locale'),
145 )
146
147 LANGUAGE_CODE = 'de-de'
148
149 TIME_ZONE = 'UTC'
150
151 USE_I18N = True
152
153 USE_L10N = True
154
155 USE_TZ = True
156
157
158 # Static files (CSS, JavaScript, Images)
159 # https://docs.djangoproject.com/en/1.11/howto/static-files/
160
161 STATIC_URL = '/static/'
162
163
164 # Login
165 LOGIN_URL = '/login'
166 LOGIN_REDIRECT_URL = '/'
167 LOGOUT_REDIRECT_URL = '/login'
168
169 # API FRAMEWORK
170 REST_FRAMEWORK = {
171 # Use Django's standard `django.contrib.auth` permissions,
172 # or allow read-only access for unauthenticated users.
173 'DEFAULT_PERMISSION_CLASSES': [
174 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
175 ]
176 }
177
178 # Miscellaneous
179 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
180 CSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'
181
182 MEDIA_URL = '/media/'
183 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
184 FILER_CANONICAL_URL = 'media/'
185
186 LOGGING = {
187 'version': 1,
188 'disable_existing_loggers': False,
189 'handlers': {
190 'console': {
191 'class': 'logging.StreamHandler'
192 },
193 },
194 'loggers': {
195 'django': {
196 'handlers': ['console'],
197 'level': 'WARN',
198 'propagate': True,
199 },
200 'api': {
201 'handlers': ['console'],
202 'level': 'INFO',
203 'propagate': True,
204 },
205 'cms': {
206 'handlers': ['console'],
207 'level': 'INFO',
208 'propagate': True,
209 },
210 'rules': {
211 'handlers': ['console'],
212 'level': 'DEBUG',
213 'propagate': True,
214 },
215 }
216 }
217
[end of backend/backend/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/backend/docker_settings.py b/backend/backend/docker_settings.py
--- a/backend/backend/docker_settings.py
+++ b/backend/backend/docker_settings.py
@@ -2,16 +2,13 @@
# pylint: disable=unused-wildcard-import
from .settings import *
-# Database
-# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
-
-DATABASE_DICT = {
+DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'integreat',
'USER': 'integreat',
'PASSWORD': 'password',
'HOST': 'localhost',
- 'PORT': '5432',
+ 'PORT': '5433',
}
}
diff --git a/backend/backend/settings.py b/backend/backend/settings.py
--- a/backend/backend/settings.py
+++ b/backend/backend/settings.py
@@ -95,7 +95,7 @@
'USER': 'integreat',
'PASSWORD': 'password',
'HOST': 'localhost',
- 'PORT': '5433',
+ 'PORT': '5432',
}
}
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -28,14 +28,18 @@
"lxml==4.3.3",
"packaging==19.0",
"psycopg2-binary==2.8.3",
- "pylint==2.3.1",
- "pylint-django==2.0.11",
- "pylint_runner==0.5.4",
"python-dateutil==2.8.0",
"requests==2.22.0",
"rules==2.0.1",
"six==1.11.0",
],
+ extras_require={
+ "dev": [
+ "pylint==2.3.1",
+ "pylint-django==2.0.11",
+ "pylint_runner==0.5.4",
+ ]
+ },
author="Integreat App Project",
author_email="[email protected]",
description="Content Management System for the Integreat App",
| {"golden_diff": "diff --git a/backend/backend/docker_settings.py b/backend/backend/docker_settings.py\n--- a/backend/backend/docker_settings.py\n+++ b/backend/backend/docker_settings.py\n@@ -2,16 +2,13 @@\n # pylint: disable=unused-wildcard-import\n from .settings import *\n \n-# Database\n-# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n-\n-DATABASE_DICT = {\n+DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n- 'PORT': '5432',\n+ 'PORT': '5433',\n }\n }\ndiff --git a/backend/backend/settings.py b/backend/backend/settings.py\n--- a/backend/backend/settings.py\n+++ b/backend/backend/settings.py\n@@ -95,7 +95,7 @@\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n- 'PORT': '5433',\n+ 'PORT': '5432',\n }\n }\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,14 +28,18 @@\n \"lxml==4.3.3\",\n \"packaging==19.0\",\n \"psycopg2-binary==2.8.3\",\n- \"pylint==2.3.1\",\n- \"pylint-django==2.0.11\",\n- \"pylint_runner==0.5.4\",\n \"python-dateutil==2.8.0\",\n \"requests==2.22.0\",\n \"rules==2.0.1\",\n \"six==1.11.0\",\n ],\n+ extras_require={\n+ \"dev\": [\n+ \"pylint==2.3.1\",\n+ \"pylint-django==2.0.11\",\n+ \"pylint_runner==0.5.4\",\n+ ]\n+ },\n author=\"Integreat App Project\",\n author_email=\"[email protected]\",\n description=\"Content Management System for the Integreat App\",\n", "issue": "Provide Test Data\nFor developing and testing purposes, we need a fast way to import valid test data into the database. I propose using fixtures or a suitable library for this.\nProvide Test Data\nFor developing and testing purposes, we need a fast way to import valid test data into the database. I propose using fixtures or a suitable library for this.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"integreat_cms\",\n version=\"0.0.13\",\n packages=find_packages(\"backend\"),\n package_dir={'':'backend'},\n include_package_data=True,\n scripts=['backend/integreat-cms'],\n data_files= [(\"lib/integreat-{}\".format(root), [os.path.join(root, f) for f in files])\n for root, dirs, files in os.walk('backend/cms/templates/')] +\n [(\"lib/integreat-{}\".format(root), [os.path.join(root, f) for f in files])\n for root, dirs, files in os.walk('backend/cms/static/')] +\n [('usr/lib/systemd/system/', ['systemd/[email protected]'])],\n install_requires=[\n \"beautifulsoup4==4.8.0\",\n \"cffi==1.12.3\",\n \"Django==1.11.20\",\n \"django-filer==1.5.0\",\n \"django-mptt==0.9.1\",\n \"django-widget-tweaks==1.4.3\",\n \"djangorestframework==3.9.4\",\n \"drf-yasg==1.16.1\",\n \"idna==2.6\",\n \"lxml==4.3.3\",\n \"packaging==19.0\",\n \"psycopg2-binary==2.8.3\",\n \"pylint==2.3.1\",\n \"pylint-django==2.0.11\",\n \"pylint_runner==0.5.4\",\n \"python-dateutil==2.8.0\",\n \"requests==2.22.0\",\n \"rules==2.0.1\",\n \"six==1.11.0\",\n ],\n author=\"Integreat App Project\",\n author_email=\"[email protected]\",\n description=\"Content Management System for the Integreat App\",\n license=\"GPL-2.0-or-later\",\n keywords=\"Django Integreat CMS\",\n url=\"http://github.com/Integreat/\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ]\n)\n", "path": "setup.py"}, {"content": "# pylint: disable=wildcard-import\n# pylint: disable=unused-wildcard-import\nfrom .settings import *\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASE_DICT = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n 'PORT': '5432',\n }\n}\n", "path": "backend/backend/docker_settings.py"}, {"content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n '0.0.0.0'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'cms.apps.CmsConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'widget_tweaks',\n 'easy_thumbnails',\n 'filer',\n 'drf_yasg',\n 'mptt',\n 'rest_framework',\n 'rules.apps.AutodiscoverRulesConfig',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'backend.urls'\nTHUMBNAIL_HIGH_RESOLUTION = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'backend.context_processors.region_slug_processor',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'backend.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n 'PORT': '5433',\n }\n}\n\n# Directory for initial database contents\n\nFIXTURE_DIRS = (\n os.path.join(BASE_DIR, 'cms/fixtures/'),\n)\n\n# Authentication backends\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend', # this is default\n)\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGES = (\n ('en-us', 'English'),\n ('de-de', 'Deutsch'),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nLANGUAGE_CODE = 'de-de'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Login\nLOGIN_URL = '/login'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/login'\n\n# API FRAMEWORK\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ]\n}\n\n# Miscellaneous\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nCSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nFILER_CANONICAL_URL = 'media/'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'WARN',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'cms': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'rules': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n", "path": "backend/backend/settings.py"}]} | 3,241 | 505 |
gh_patches_debug_23057 | rasdani/github-patches | git_diff | goauthentik__authentik-8738 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
no webauthn device register possible
**Describe the bug**
If webauthn device is already registered authentication works without any issue.
But as long as no webauthn device is configured one can neither enforce it in the validation flow nor manually in the user profile.
**To Reproduce**
Steps to reproduce the behavior:
1. delete webauthn device
2. logout
3. login
4. try to add webauthn device in user profile settings
**Expected behavior**
webauthn device can be registered
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 291, in get
stage_response = self.current_stage_view.dispatch(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/django/views/generic/base.py", line 143, in dispatch
return handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/flows/stage.py", line 95, in get
challenge = self._get_challenge(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/flows/stage.py", line 172, in _get_challenge
challenge = self.get_challenge(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_webauthn/stage.py", line 114, in get_challenge
"registration": loads(options_to_json(registration_options)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.12/site-packages/webauthn/helpers/options_to_json.py", line 71, in options_to_json
] = _selection.authenticator_attachment.value
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
builtins.AttributeError: 'str' object has no attribute 'value'
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: 2024.2.1
- Deployment: helm
</issue>
<code>
[start of authentik/stages/authenticator_webauthn/stage.py]
1 """WebAuthn stage"""
2
3 from json import loads
4
5 from django.http import HttpRequest, HttpResponse
6 from django.http.request import QueryDict
7 from rest_framework.fields import CharField
8 from rest_framework.serializers import ValidationError
9 from webauthn import options_to_json
10 from webauthn.helpers.bytes_to_base64url import bytes_to_base64url
11 from webauthn.helpers.exceptions import InvalidRegistrationResponse
12 from webauthn.helpers.structs import (
13 AuthenticatorSelectionCriteria,
14 PublicKeyCredentialCreationOptions,
15 ResidentKeyRequirement,
16 UserVerificationRequirement,
17 )
18 from webauthn.registration.generate_registration_options import generate_registration_options
19 from webauthn.registration.verify_registration_response import (
20 VerifiedRegistration,
21 verify_registration_response,
22 )
23
24 from authentik.core.api.utils import JSONDictField
25 from authentik.core.models import User
26 from authentik.flows.challenge import (
27 Challenge,
28 ChallengeResponse,
29 ChallengeTypes,
30 WithUserInfoChallenge,
31 )
32 from authentik.flows.stage import ChallengeStageView
33 from authentik.stages.authenticator_webauthn.models import AuthenticateWebAuthnStage, WebAuthnDevice
34 from authentik.stages.authenticator_webauthn.utils import get_origin, get_rp_id
35
36 SESSION_KEY_WEBAUTHN_CHALLENGE = "authentik/stages/authenticator_webauthn/challenge"
37
38
39 class AuthenticatorWebAuthnChallenge(WithUserInfoChallenge):
40 """WebAuthn Challenge"""
41
42 registration = JSONDictField()
43 component = CharField(default="ak-stage-authenticator-webauthn")
44
45
46 class AuthenticatorWebAuthnChallengeResponse(ChallengeResponse):
47 """WebAuthn Challenge response"""
48
49 response = JSONDictField()
50 component = CharField(default="ak-stage-authenticator-webauthn")
51
52 request: HttpRequest
53 user: User
54
55 def validate_response(self, response: dict) -> dict:
56 """Validate webauthn challenge response"""
57 challenge = self.request.session[SESSION_KEY_WEBAUTHN_CHALLENGE]
58
59 try:
60 registration: VerifiedRegistration = verify_registration_response(
61 credential=response,
62 expected_challenge=challenge,
63 expected_rp_id=get_rp_id(self.request),
64 expected_origin=get_origin(self.request),
65 )
66 except InvalidRegistrationResponse as exc:
67 self.stage.logger.warning("registration failed", exc=exc)
68 raise ValidationError(f"Registration failed. Error: {exc}") from None
69
70 credential_id_exists = WebAuthnDevice.objects.filter(
71 credential_id=bytes_to_base64url(registration.credential_id)
72 ).first()
73 if credential_id_exists:
74 raise ValidationError("Credential ID already exists.")
75
76 return registration
77
78
79 class AuthenticatorWebAuthnStageView(ChallengeStageView):
80 """WebAuthn stage"""
81
82 response_class = AuthenticatorWebAuthnChallengeResponse
83
84 def get_challenge(self, *args, **kwargs) -> Challenge:
85 # clear session variables prior to starting a new registration
86 self.request.session.pop(SESSION_KEY_WEBAUTHN_CHALLENGE, None)
87 stage: AuthenticateWebAuthnStage = self.executor.current_stage
88 user = self.get_pending_user()
89
90 # library accepts none so we store null in the database, but if there is a value
91 # set, cast it to string to ensure it's not a django class
92 authenticator_attachment = stage.authenticator_attachment
93 if authenticator_attachment:
94 authenticator_attachment = str(authenticator_attachment)
95
96 registration_options: PublicKeyCredentialCreationOptions = generate_registration_options(
97 rp_id=get_rp_id(self.request),
98 rp_name=self.request.brand.branding_title,
99 user_id=user.uid.encode("utf-8"),
100 user_name=user.username,
101 user_display_name=user.name,
102 authenticator_selection=AuthenticatorSelectionCriteria(
103 resident_key=ResidentKeyRequirement(str(stage.resident_key_requirement)),
104 user_verification=UserVerificationRequirement(str(stage.user_verification)),
105 authenticator_attachment=authenticator_attachment,
106 ),
107 )
108
109 self.request.session[SESSION_KEY_WEBAUTHN_CHALLENGE] = registration_options.challenge
110 self.request.session.save()
111 return AuthenticatorWebAuthnChallenge(
112 data={
113 "type": ChallengeTypes.NATIVE.value,
114 "registration": loads(options_to_json(registration_options)),
115 }
116 )
117
118 def get_response_instance(self, data: QueryDict) -> AuthenticatorWebAuthnChallengeResponse:
119 response: AuthenticatorWebAuthnChallengeResponse = super().get_response_instance(data)
120 response.request = self.request
121 response.user = self.get_pending_user()
122 return response
123
124 def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:
125 # Webauthn Challenge has already been validated
126 webauthn_credential: VerifiedRegistration = response.validated_data["response"]
127 existing_device = WebAuthnDevice.objects.filter(
128 credential_id=bytes_to_base64url(webauthn_credential.credential_id)
129 ).first()
130 if not existing_device:
131 WebAuthnDevice.objects.create(
132 user=self.get_pending_user(),
133 public_key=bytes_to_base64url(webauthn_credential.credential_public_key),
134 credential_id=bytes_to_base64url(webauthn_credential.credential_id),
135 sign_count=webauthn_credential.sign_count,
136 rp_id=get_rp_id(self.request),
137 name="WebAuthn Device",
138 )
139 else:
140 return self.executor.stage_invalid("Device with Credential ID already exists.")
141 return self.executor.stage_ok()
142
143 def cleanup(self):
144 self.request.session.pop(SESSION_KEY_WEBAUTHN_CHALLENGE, None)
145
[end of authentik/stages/authenticator_webauthn/stage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/stages/authenticator_webauthn/stage.py b/authentik/stages/authenticator_webauthn/stage.py
--- a/authentik/stages/authenticator_webauthn/stage.py
+++ b/authentik/stages/authenticator_webauthn/stage.py
@@ -10,6 +10,7 @@
from webauthn.helpers.bytes_to_base64url import bytes_to_base64url
from webauthn.helpers.exceptions import InvalidRegistrationResponse
from webauthn.helpers.structs import (
+ AuthenticatorAttachment,
AuthenticatorSelectionCriteria,
PublicKeyCredentialCreationOptions,
ResidentKeyRequirement,
@@ -91,7 +92,7 @@
# set, cast it to string to ensure it's not a django class
authenticator_attachment = stage.authenticator_attachment
if authenticator_attachment:
- authenticator_attachment = str(authenticator_attachment)
+ authenticator_attachment = AuthenticatorAttachment(str(authenticator_attachment))
registration_options: PublicKeyCredentialCreationOptions = generate_registration_options(
rp_id=get_rp_id(self.request),
| {"golden_diff": "diff --git a/authentik/stages/authenticator_webauthn/stage.py b/authentik/stages/authenticator_webauthn/stage.py\n--- a/authentik/stages/authenticator_webauthn/stage.py\n+++ b/authentik/stages/authenticator_webauthn/stage.py\n@@ -10,6 +10,7 @@\n from webauthn.helpers.bytes_to_base64url import bytes_to_base64url\n from webauthn.helpers.exceptions import InvalidRegistrationResponse\n from webauthn.helpers.structs import (\n+ AuthenticatorAttachment,\n AuthenticatorSelectionCriteria,\n PublicKeyCredentialCreationOptions,\n ResidentKeyRequirement,\n@@ -91,7 +92,7 @@\n # set, cast it to string to ensure it's not a django class\n authenticator_attachment = stage.authenticator_attachment\n if authenticator_attachment:\n- authenticator_attachment = str(authenticator_attachment)\n+ authenticator_attachment = AuthenticatorAttachment(str(authenticator_attachment))\n \n registration_options: PublicKeyCredentialCreationOptions = generate_registration_options(\n rp_id=get_rp_id(self.request),\n", "issue": "no webauthn device register possible\n**Describe the bug**\r\nIf webauthn device is already registered authentication works without any issue.\r\n\r\nBut as long as no webauthn device is configured one can neither enforce it in the validation flow nor manually in the user profile.\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. delete webauthn device\r\n2. logout\r\n3. login\r\n4. try to add webauthn device in user profile settings\r\n\r\n**Expected behavior**\r\nwebauthn device can be registered\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 291, in get\r\n stage_response = self.current_stage_view.dispatch(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/django/views/generic/base.py\", line 143, in dispatch\r\n return handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/flows/stage.py\", line 95, in get\r\n challenge = self._get_challenge(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/flows/stage.py\", line 172, in _get_challenge\r\n challenge = self.get_challenge(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_webauthn/stage.py\", line 114, in get_challenge\r\n \"registration\": loads(options_to_json(registration_options)),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.12/site-packages/webauthn/helpers/options_to_json.py\", line 71, in options_to_json\r\n ] = _selection.authenticator_attachment.value\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nbuiltins.AttributeError: 'str' object has no attribute 'value'\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2024.2.1\r\n- Deployment: helm\r\n\r\n\n", "before_files": [{"content": "\"\"\"WebAuthn stage\"\"\"\n\nfrom json import loads\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http.request import QueryDict\nfrom rest_framework.fields import CharField\nfrom rest_framework.serializers import ValidationError\nfrom webauthn import options_to_json\nfrom webauthn.helpers.bytes_to_base64url import bytes_to_base64url\nfrom webauthn.helpers.exceptions import InvalidRegistrationResponse\nfrom webauthn.helpers.structs import (\n AuthenticatorSelectionCriteria,\n PublicKeyCredentialCreationOptions,\n ResidentKeyRequirement,\n UserVerificationRequirement,\n)\nfrom webauthn.registration.generate_registration_options import generate_registration_options\nfrom webauthn.registration.verify_registration_response import (\n VerifiedRegistration,\n verify_registration_response,\n)\n\nfrom authentik.core.api.utils import JSONDictField\nfrom authentik.core.models import User\nfrom authentik.flows.challenge import (\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n WithUserInfoChallenge,\n)\nfrom authentik.flows.stage import ChallengeStageView\nfrom authentik.stages.authenticator_webauthn.models import AuthenticateWebAuthnStage, WebAuthnDevice\nfrom authentik.stages.authenticator_webauthn.utils import get_origin, get_rp_id\n\nSESSION_KEY_WEBAUTHN_CHALLENGE = \"authentik/stages/authenticator_webauthn/challenge\"\n\n\nclass AuthenticatorWebAuthnChallenge(WithUserInfoChallenge):\n \"\"\"WebAuthn Challenge\"\"\"\n\n registration = JSONDictField()\n component = CharField(default=\"ak-stage-authenticator-webauthn\")\n\n\nclass AuthenticatorWebAuthnChallengeResponse(ChallengeResponse):\n \"\"\"WebAuthn Challenge response\"\"\"\n\n response = JSONDictField()\n component = CharField(default=\"ak-stage-authenticator-webauthn\")\n\n request: HttpRequest\n user: User\n\n def validate_response(self, response: dict) -> dict:\n \"\"\"Validate webauthn challenge response\"\"\"\n challenge = self.request.session[SESSION_KEY_WEBAUTHN_CHALLENGE]\n\n try:\n registration: VerifiedRegistration = verify_registration_response(\n credential=response,\n expected_challenge=challenge,\n expected_rp_id=get_rp_id(self.request),\n expected_origin=get_origin(self.request),\n )\n except InvalidRegistrationResponse as exc:\n self.stage.logger.warning(\"registration failed\", exc=exc)\n raise ValidationError(f\"Registration failed. Error: {exc}\") from None\n\n credential_id_exists = WebAuthnDevice.objects.filter(\n credential_id=bytes_to_base64url(registration.credential_id)\n ).first()\n if credential_id_exists:\n raise ValidationError(\"Credential ID already exists.\")\n\n return registration\n\n\nclass AuthenticatorWebAuthnStageView(ChallengeStageView):\n \"\"\"WebAuthn stage\"\"\"\n\n response_class = AuthenticatorWebAuthnChallengeResponse\n\n def get_challenge(self, *args, **kwargs) -> Challenge:\n # clear session variables prior to starting a new registration\n self.request.session.pop(SESSION_KEY_WEBAUTHN_CHALLENGE, None)\n stage: AuthenticateWebAuthnStage = self.executor.current_stage\n user = self.get_pending_user()\n\n # library accepts none so we store null in the database, but if there is a value\n # set, cast it to string to ensure it's not a django class\n authenticator_attachment = stage.authenticator_attachment\n if authenticator_attachment:\n authenticator_attachment = str(authenticator_attachment)\n\n registration_options: PublicKeyCredentialCreationOptions = generate_registration_options(\n rp_id=get_rp_id(self.request),\n rp_name=self.request.brand.branding_title,\n user_id=user.uid.encode(\"utf-8\"),\n user_name=user.username,\n user_display_name=user.name,\n authenticator_selection=AuthenticatorSelectionCriteria(\n resident_key=ResidentKeyRequirement(str(stage.resident_key_requirement)),\n user_verification=UserVerificationRequirement(str(stage.user_verification)),\n authenticator_attachment=authenticator_attachment,\n ),\n )\n\n self.request.session[SESSION_KEY_WEBAUTHN_CHALLENGE] = registration_options.challenge\n self.request.session.save()\n return AuthenticatorWebAuthnChallenge(\n data={\n \"type\": ChallengeTypes.NATIVE.value,\n \"registration\": loads(options_to_json(registration_options)),\n }\n )\n\n def get_response_instance(self, data: QueryDict) -> AuthenticatorWebAuthnChallengeResponse:\n response: AuthenticatorWebAuthnChallengeResponse = super().get_response_instance(data)\n response.request = self.request\n response.user = self.get_pending_user()\n return response\n\n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n # Webauthn Challenge has already been validated\n webauthn_credential: VerifiedRegistration = response.validated_data[\"response\"]\n existing_device = WebAuthnDevice.objects.filter(\n credential_id=bytes_to_base64url(webauthn_credential.credential_id)\n ).first()\n if not existing_device:\n WebAuthnDevice.objects.create(\n user=self.get_pending_user(),\n public_key=bytes_to_base64url(webauthn_credential.credential_public_key),\n credential_id=bytes_to_base64url(webauthn_credential.credential_id),\n sign_count=webauthn_credential.sign_count,\n rp_id=get_rp_id(self.request),\n name=\"WebAuthn Device\",\n )\n else:\n return self.executor.stage_invalid(\"Device with Credential ID already exists.\")\n return self.executor.stage_ok()\n\n def cleanup(self):\n self.request.session.pop(SESSION_KEY_WEBAUTHN_CHALLENGE, None)\n", "path": "authentik/stages/authenticator_webauthn/stage.py"}]} | 2,541 | 231 |
gh_patches_debug_16336 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py]
1 import operator
2 import warnings
3 from copy import deepcopy
4 from functools import reduce
5 from typing import Dict, List
6
7 import torch
8
9 from colossalai.auto_parallel.tensor_shard.deprecated._utils import (enumerate_all_possible_1d_sharding,
10 enumerate_all_possible_2d_sharding,
11 ignore_sharding_exception)
12 from colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import (ShardingStrategy, StrategiesVector)
13 from colossalai.tensor.shape_consistency import ShapeConsistencyManager
14 from colossalai.tensor.sharding_spec import ShardingSpec
15
16 from .operator_handler import OperatorHandler
17
18 __all__ = ['WhereHandler']
19
20
21 class WhereHandler(OperatorHandler):
22 """
23 An OperatorHandler which deals with the sharding strategies of torch.where.
24 """
25
26 def __init__(self, *args, **kwargs):
27 # TODO: x or y could be scalar
28 super().__init__(*args, **kwargs)
29 assert len(self.predecessor_node) == 3
30 self.condition_data = self.predecessor_node[0]._meta_data
31 self.x_data = self.predecessor_node[1]._meta_data
32 self.y_data = self.predecessor_node[2]._meta_data
33 self.condition = self.predecessor_node[0]
34 self.x = self.predecessor_node[1]
35 self.y = self.predecessor_node[2]
36 self.output_data = self.node._meta_data
37
38 def _generate_sharding_spec(self, input_: torch.Tensor, dim_partition_dict: Dict[int, List[int]]) -> ShardingSpec:
39 shape = list(input_.shape)
40
41 # padding the shape to the same length as output_data
42 while len(shape) < self.output_data.dim():
43 shape.insert(0, 1)
44 shape = torch.Size(shape)
45
46 # if the sharding happens on a size one dimension, we should record it as R.
47 processed_dim_partition_dict = deepcopy(dim_partition_dict)
48 for dim_index, _ in dim_partition_dict.items():
49 if shape[dim_index] == 1:
50 processed_dim_partition_dict.pop(dim_index)
51 for dim_index, sharding_index_list in processed_dim_partition_dict.items():
52 sharding_list = [self.device_mesh.mesh_shape[sharding_index] for sharding_index in sharding_index_list]
53 sharding_size = reduce(operator.mul, sharding_list, 1)
54 assert shape[
55 dim_index] % sharding_size == 0, f'we cannot shard the {dim_index} dimension of tensor into {sharding_size} partitions.'
56 sharding_spec = ShardingSpec(device_mesh=self.device_mesh,
57 entire_shape=shape,
58 dim_partition_dict=processed_dim_partition_dict)
59
60 return sharding_spec
61
62 def _generate_compute_cost(self, total_sharding_size):
63 lhs_matrix_shape = self.lhs_data.shape[-2:]
64 rhs_matrix_shape = self.rhs_data.shape[-2:]
65 batch_dimensions_shape = self.output_data.shape[:-2]
66 batch_dimensions_product = reduce(operator.mul, batch_dimensions_shape, 1)
67 compute_cost = reduce(
68 operator.mul, lhs_matrix_shape) * rhs_matrix_shape[0] * batch_dimensions_product * 2 / total_sharding_size
69 return compute_cost
70
71 def _generate_resharding_costs(self, sharding_specs):
72 # The resharding_cost of weight is counted due to sharing weight cases.
73 dtype = self.node._meta_data.dtype
74 nodes = self.predecessor_node
75 resharding_costs = {}
76 size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size()
77
78 # shape consistency manager is a singleton class
79 shape_consistency_manager = ShapeConsistencyManager()
80
81 for input_node, input_spec in zip(nodes, sharding_specs):
82 resharding_costs[input_node] = []
83 for strategy in input_node.strategies_vector:
84 input_sharding_spec = strategy.output_sharding_spec
85 assert isinstance(input_sharding_spec, ShardingSpec), f'The input node should NOT be a tuple of tensor.'
86 # if the input shape is smaller than the target input, we will fill the input to the same length as target.
87 # Then, use the padded input sharding spec to compute the resharding cost.
88 if len(input_sharding_spec.entire_shape) < len(input_spec.entire_shape):
89 new_entire_shape = list(input_sharding_spec.entire_shape)
90 while len(new_entire_shape) < len(input_spec.entire_shape):
91 new_entire_shape.insert(0, 1)
92 new_entire_shape = torch.Size(new_entire_shape)
93 new_device_mesh = input_sharding_spec.device_mesh
94 new_dim_partition_dict = input_sharding_spec.dim_partition_dict
95 input_sharding_spec = ShardingSpec(device_mesh=new_device_mesh,
96 entire_shape=new_entire_shape,
97 dim_partition_dict=new_dim_partition_dict)
98
99 # compute the resharding cost
100 _, _, total_resharding_cost = shape_consistency_manager.shape_consistency(
101 input_sharding_spec, input_spec)
102 total_resharding_cost = total_resharding_cost['total']
103 # we need multiply the size of elem dtype to get correct communication cost
104 resharding_cost = total_resharding_cost * size_per_elem_bytes
105 resharding_costs[input_node].append(resharding_cost)
106
107 return resharding_costs
108
109 def _convert_partition_dict_to_sharding_spec(self, dim_partition_list):
110
111 sharding_spec_list = []
112 check_duplicated_list = []
113 for output_dim_partition_dict in dim_partition_list:
114 try:
115 output_sharding_spec = self._generate_sharding_spec(self.output_data, output_dim_partition_dict)
116 except AssertionError as e:
117 warnings.warn(f'{e}')
118 break
119 sharding_seq = output_sharding_spec.sharding_sequence
120 if sharding_seq not in check_duplicated_list:
121 check_duplicated_list.append(sharding_seq)
122 sharding_spec_list.append(output_sharding_spec)
123
124 return sharding_spec_list
125
126 def _enumerate_all_possible_output(self, mesh_dim_0, mesh_dim_1):
127 # use mesh_dim_0, mesh_dim_1 instead of constant 0, 1 in here for N-D device mesh scaliablity.
128
129 output_dim_partition_list = []
130 dim_size = self.output_data.dim()
131 # enumerate all the 2D sharding cases
132 sharding_list_2d = enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size)
133 output_dim_partition_list.extend(sharding_list_2d)
134
135 # enumerate all the 1D sharding cases
136 sharding_list_1d_on_dim_0 = enumerate_all_possible_1d_sharding(mesh_dim_0, dim_size)
137 output_dim_partition_list.extend(sharding_list_1d_on_dim_0)
138 sharding_list_1d_on_dim_1 = enumerate_all_possible_1d_sharding(mesh_dim_1, dim_size)
139 output_dim_partition_list.extend(sharding_list_1d_on_dim_1)
140
141 # add empty dict for fully replicated case
142 output_dim_partition_list.append({})
143 output_sharding_spec_list = self._convert_partition_dict_to_sharding_spec(output_dim_partition_list)
144
145 return output_sharding_spec_list
146
147 @ignore_sharding_exception
148 def _register_strategy(self, output_sharding_spec):
149 dim_partition_dict_for_input = output_sharding_spec.dim_partition_dict
150 sharding_spec_for_condition = self._generate_sharding_spec(self.condition_data, dim_partition_dict_for_input)
151 sharding_spec_for_x = self._generate_sharding_spec(self.x_data, dim_partition_dict_for_input)
152 sharding_spec_for_y = self._generate_sharding_spec(self.y_data, dim_partition_dict_for_input)
153
154 name = f'{output_sharding_spec.sharding_sequence} = {sharding_spec_for_condition.sharding_sequence} x {sharding_spec_for_x.sharding_sequence} x {sharding_spec_for_y.sharding_sequence}'
155 dim_partition_dict_for_output = output_sharding_spec.dim_partition_dict
156
157 # generate resharding cost for this strategy
158 resharding_costs = self._generate_resharding_costs(
159 [sharding_spec_for_condition, sharding_spec_for_x, sharding_spec_for_y])
160
161 # compute the computation cost of this strategy
162 sharding_dims = []
163 for mesh_dims in dim_partition_dict_for_output.values():
164 for mesh_dim in mesh_dims:
165 sharding_dims.append(self.device_mesh.shape[mesh_dim])
166 sharding_size = reduce(operator.mul, sharding_dims, 1)
167 memory_cost = self.output_data.numel() / sharding_size
168 compute_cost = memory_cost
169 communication_cost = 0
170
171 sharding_strategies = ShardingStrategy(name,
172 output_sharding_spec=output_sharding_spec,
173 compute_cost=compute_cost,
174 communication_cost=communication_cost,
175 memory_cost=memory_cost,
176 resharding_costs=resharding_costs,
177 input_shardings=(sharding_spec_for_condition, sharding_spec_for_x,
178 sharding_spec_for_y))
179
180 self.strategies_vector.append(sharding_strategies)
181
182 def register_strategy(self) -> StrategiesVector:
183 MESH_DIM_LIST = [0, 1]
184 output_sharding_specs = self._enumerate_all_possible_output(MESH_DIM_LIST[0], MESH_DIM_LIST[1])
185 for output_sharding_spec in output_sharding_specs:
186 self._register_strategy(output_sharding_spec)
187
[end of colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py b/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py
--- a/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py
+++ b/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py
@@ -6,10 +6,12 @@
import torch
-from colossalai.auto_parallel.tensor_shard.deprecated._utils import (enumerate_all_possible_1d_sharding,
- enumerate_all_possible_2d_sharding,
- ignore_sharding_exception)
-from colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import (ShardingStrategy, StrategiesVector)
+from colossalai.auto_parallel.tensor_shard.deprecated._utils import (
+ enumerate_all_possible_1d_sharding,
+ enumerate_all_possible_2d_sharding,
+ ignore_sharding_exception,
+)
+from colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import ShardingStrategy, StrategiesVector
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.tensor.sharding_spec import ShardingSpec
| {"golden_diff": "diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py b/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py\n--- a/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py\n+++ b/colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py\n@@ -6,10 +6,12 @@\n \n import torch\n \n-from colossalai.auto_parallel.tensor_shard.deprecated._utils import (enumerate_all_possible_1d_sharding,\n- enumerate_all_possible_2d_sharding,\n- ignore_sharding_exception)\n-from colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import (ShardingStrategy, StrategiesVector)\n+from colossalai.auto_parallel.tensor_shard.deprecated._utils import (\n+ enumerate_all_possible_1d_sharding,\n+ enumerate_all_possible_2d_sharding,\n+ ignore_sharding_exception,\n+)\n+from colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import ShardingStrategy, StrategiesVector\n from colossalai.tensor.shape_consistency import ShapeConsistencyManager\n from colossalai.tensor.sharding_spec import ShardingSpec\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import operator\nimport warnings\nfrom copy import deepcopy\nfrom functools import reduce\nfrom typing import Dict, List\n\nimport torch\n\nfrom colossalai.auto_parallel.tensor_shard.deprecated._utils import (enumerate_all_possible_1d_sharding,\n enumerate_all_possible_2d_sharding,\n ignore_sharding_exception)\nfrom colossalai.auto_parallel.tensor_shard.deprecated.sharding_strategy import (ShardingStrategy, StrategiesVector)\nfrom colossalai.tensor.shape_consistency import ShapeConsistencyManager\nfrom colossalai.tensor.sharding_spec import ShardingSpec\n\nfrom .operator_handler import OperatorHandler\n\n__all__ = ['WhereHandler']\n\n\nclass WhereHandler(OperatorHandler):\n \"\"\"\n An OperatorHandler which deals with the sharding strategies of torch.where.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # TODO: x or y could be scalar\n super().__init__(*args, **kwargs)\n assert len(self.predecessor_node) == 3\n self.condition_data = self.predecessor_node[0]._meta_data\n self.x_data = self.predecessor_node[1]._meta_data\n self.y_data = self.predecessor_node[2]._meta_data\n self.condition = self.predecessor_node[0]\n self.x = self.predecessor_node[1]\n self.y = self.predecessor_node[2]\n self.output_data = self.node._meta_data\n\n def _generate_sharding_spec(self, input_: torch.Tensor, dim_partition_dict: Dict[int, List[int]]) -> ShardingSpec:\n shape = list(input_.shape)\n\n # padding the shape to the same length as output_data\n while len(shape) < self.output_data.dim():\n shape.insert(0, 1)\n shape = torch.Size(shape)\n\n # if the sharding happens on a size one dimension, we should record it as R.\n processed_dim_partition_dict = deepcopy(dim_partition_dict)\n for dim_index, _ in dim_partition_dict.items():\n if shape[dim_index] == 1:\n processed_dim_partition_dict.pop(dim_index)\n for dim_index, sharding_index_list in processed_dim_partition_dict.items():\n sharding_list = [self.device_mesh.mesh_shape[sharding_index] for sharding_index in sharding_index_list]\n sharding_size = reduce(operator.mul, sharding_list, 1)\n assert shape[\n dim_index] % sharding_size == 0, f'we cannot shard the {dim_index} dimension of tensor into {sharding_size} partitions.'\n sharding_spec = ShardingSpec(device_mesh=self.device_mesh,\n entire_shape=shape,\n dim_partition_dict=processed_dim_partition_dict)\n\n return sharding_spec\n\n def _generate_compute_cost(self, total_sharding_size):\n lhs_matrix_shape = self.lhs_data.shape[-2:]\n rhs_matrix_shape = self.rhs_data.shape[-2:]\n batch_dimensions_shape = self.output_data.shape[:-2]\n batch_dimensions_product = reduce(operator.mul, batch_dimensions_shape, 1)\n compute_cost = reduce(\n operator.mul, lhs_matrix_shape) * rhs_matrix_shape[0] * batch_dimensions_product * 2 / total_sharding_size\n return compute_cost\n\n def _generate_resharding_costs(self, sharding_specs):\n # The resharding_cost of weight is counted due to sharing weight cases.\n dtype = self.node._meta_data.dtype\n nodes = self.predecessor_node\n resharding_costs = {}\n size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size()\n\n # shape consistency manager is a singleton class\n shape_consistency_manager = ShapeConsistencyManager()\n\n for input_node, input_spec in zip(nodes, sharding_specs):\n resharding_costs[input_node] = []\n for strategy in input_node.strategies_vector:\n input_sharding_spec = strategy.output_sharding_spec\n assert isinstance(input_sharding_spec, ShardingSpec), f'The input node should NOT be a tuple of tensor.'\n # if the input shape is smaller than the target input, we will fill the input to the same length as target.\n # Then, use the padded input sharding spec to compute the resharding cost.\n if len(input_sharding_spec.entire_shape) < len(input_spec.entire_shape):\n new_entire_shape = list(input_sharding_spec.entire_shape)\n while len(new_entire_shape) < len(input_spec.entire_shape):\n new_entire_shape.insert(0, 1)\n new_entire_shape = torch.Size(new_entire_shape)\n new_device_mesh = input_sharding_spec.device_mesh\n new_dim_partition_dict = input_sharding_spec.dim_partition_dict\n input_sharding_spec = ShardingSpec(device_mesh=new_device_mesh,\n entire_shape=new_entire_shape,\n dim_partition_dict=new_dim_partition_dict)\n\n # compute the resharding cost\n _, _, total_resharding_cost = shape_consistency_manager.shape_consistency(\n input_sharding_spec, input_spec)\n total_resharding_cost = total_resharding_cost['total']\n # we need multiply the size of elem dtype to get correct communication cost\n resharding_cost = total_resharding_cost * size_per_elem_bytes\n resharding_costs[input_node].append(resharding_cost)\n\n return resharding_costs\n\n def _convert_partition_dict_to_sharding_spec(self, dim_partition_list):\n\n sharding_spec_list = []\n check_duplicated_list = []\n for output_dim_partition_dict in dim_partition_list:\n try:\n output_sharding_spec = self._generate_sharding_spec(self.output_data, output_dim_partition_dict)\n except AssertionError as e:\n warnings.warn(f'{e}')\n break\n sharding_seq = output_sharding_spec.sharding_sequence\n if sharding_seq not in check_duplicated_list:\n check_duplicated_list.append(sharding_seq)\n sharding_spec_list.append(output_sharding_spec)\n\n return sharding_spec_list\n\n def _enumerate_all_possible_output(self, mesh_dim_0, mesh_dim_1):\n # use mesh_dim_0, mesh_dim_1 instead of constant 0, 1 in here for N-D device mesh scaliablity.\n\n output_dim_partition_list = []\n dim_size = self.output_data.dim()\n # enumerate all the 2D sharding cases\n sharding_list_2d = enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size)\n output_dim_partition_list.extend(sharding_list_2d)\n\n # enumerate all the 1D sharding cases\n sharding_list_1d_on_dim_0 = enumerate_all_possible_1d_sharding(mesh_dim_0, dim_size)\n output_dim_partition_list.extend(sharding_list_1d_on_dim_0)\n sharding_list_1d_on_dim_1 = enumerate_all_possible_1d_sharding(mesh_dim_1, dim_size)\n output_dim_partition_list.extend(sharding_list_1d_on_dim_1)\n\n # add empty dict for fully replicated case\n output_dim_partition_list.append({})\n output_sharding_spec_list = self._convert_partition_dict_to_sharding_spec(output_dim_partition_list)\n\n return output_sharding_spec_list\n\n @ignore_sharding_exception\n def _register_strategy(self, output_sharding_spec):\n dim_partition_dict_for_input = output_sharding_spec.dim_partition_dict\n sharding_spec_for_condition = self._generate_sharding_spec(self.condition_data, dim_partition_dict_for_input)\n sharding_spec_for_x = self._generate_sharding_spec(self.x_data, dim_partition_dict_for_input)\n sharding_spec_for_y = self._generate_sharding_spec(self.y_data, dim_partition_dict_for_input)\n\n name = f'{output_sharding_spec.sharding_sequence} = {sharding_spec_for_condition.sharding_sequence} x {sharding_spec_for_x.sharding_sequence} x {sharding_spec_for_y.sharding_sequence}'\n dim_partition_dict_for_output = output_sharding_spec.dim_partition_dict\n\n # generate resharding cost for this strategy\n resharding_costs = self._generate_resharding_costs(\n [sharding_spec_for_condition, sharding_spec_for_x, sharding_spec_for_y])\n\n # compute the computation cost of this strategy\n sharding_dims = []\n for mesh_dims in dim_partition_dict_for_output.values():\n for mesh_dim in mesh_dims:\n sharding_dims.append(self.device_mesh.shape[mesh_dim])\n sharding_size = reduce(operator.mul, sharding_dims, 1)\n memory_cost = self.output_data.numel() / sharding_size\n compute_cost = memory_cost\n communication_cost = 0\n\n sharding_strategies = ShardingStrategy(name,\n output_sharding_spec=output_sharding_spec,\n compute_cost=compute_cost,\n communication_cost=communication_cost,\n memory_cost=memory_cost,\n resharding_costs=resharding_costs,\n input_shardings=(sharding_spec_for_condition, sharding_spec_for_x,\n sharding_spec_for_y))\n\n self.strategies_vector.append(sharding_strategies)\n\n def register_strategy(self) -> StrategiesVector:\n MESH_DIM_LIST = [0, 1]\n output_sharding_specs = self._enumerate_all_possible_output(MESH_DIM_LIST[0], MESH_DIM_LIST[1])\n for output_sharding_spec in output_sharding_specs:\n self._register_strategy(output_sharding_spec)\n", "path": "colossalai/auto_parallel/tensor_shard/deprecated/op_handler/where_handler.py"}]} | 3,049 | 256 |
gh_patches_debug_24354 | rasdani/github-patches | git_diff | pyca__cryptography-4261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop Wheezy Builder?
Wheezy is EOL as of May 31, 2018 (https://wiki.debian.org/LTS). Should we drop it out of our test matrix going forward? We still have OpenSSL 1.0.1 coverage through building on Ubuntu 14.04 (supported through April 2019).
</issue>
<code>
[start of src/cryptography/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import sys
11 import warnings
12
13
14 # We use a UserWarning subclass, instead of DeprecationWarning, because CPython
15 # decided deprecation warnings should be invisble by default.
16 class CryptographyDeprecationWarning(UserWarning):
17 pass
18
19
20 # Several APIs were deprecated with no specific end-of-life date because of the
21 # ubiquity of their use. They should not be removed until we agree on when that
22 # cycle ends.
23 PersistentlyDeprecated = CryptographyDeprecationWarning
24 DeprecatedIn21 = CryptographyDeprecationWarning
25
26
27 def _check_bytes(name, value):
28 if not isinstance(value, bytes):
29 raise TypeError("{0} must be bytes".format(name))
30
31
32 def read_only_property(name):
33 return property(lambda self: getattr(self, name))
34
35
36 def register_interface(iface):
37 def register_decorator(klass):
38 verify_interface(iface, klass)
39 iface.register(klass)
40 return klass
41 return register_decorator
42
43
44 def register_interface_if(predicate, iface):
45 def register_decorator(klass):
46 if predicate:
47 verify_interface(iface, klass)
48 iface.register(klass)
49 return klass
50 return register_decorator
51
52
53 if hasattr(int, "from_bytes"):
54 int_from_bytes = int.from_bytes
55 else:
56 def int_from_bytes(data, byteorder, signed=False):
57 assert byteorder == 'big'
58 assert not signed
59
60 return int(binascii.hexlify(data), 16)
61
62
63 if hasattr(int, "to_bytes"):
64 def int_to_bytes(integer, length=None):
65 return integer.to_bytes(
66 length or (integer.bit_length() + 7) // 8 or 1, 'big'
67 )
68 else:
69 def int_to_bytes(integer, length=None):
70 hex_string = '%x' % integer
71 if length is None:
72 n = len(hex_string)
73 else:
74 n = length * 2
75 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
76
77
78 class InterfaceNotImplemented(Exception):
79 pass
80
81
82 if hasattr(inspect, "signature"):
83 signature = inspect.signature
84 else:
85 signature = inspect.getargspec
86
87
88 def verify_interface(iface, klass):
89 for method in iface.__abstractmethods__:
90 if not hasattr(klass, method):
91 raise InterfaceNotImplemented(
92 "{0} is missing a {1!r} method".format(klass, method)
93 )
94 if isinstance(getattr(iface, method), abc.abstractproperty):
95 # Can't properly verify these yet.
96 continue
97 sig = signature(getattr(iface, method))
98 actual = signature(getattr(klass, method))
99 if sig != actual:
100 raise InterfaceNotImplemented(
101 "{0}.{1}'s signature differs from the expected. Expected: "
102 "{2!r}. Received: {3!r}".format(
103 klass, method, sig, actual
104 )
105 )
106
107
108 # No longer needed as of 2.2, but retained because we have external consumers
109 # who use it.
110 def bit_length(x):
111 return x.bit_length()
112
113
114 class _DeprecatedValue(object):
115 def __init__(self, value, message, warning_class):
116 self.value = value
117 self.message = message
118 self.warning_class = warning_class
119
120
121 class _ModuleWithDeprecations(object):
122 def __init__(self, module):
123 self.__dict__["_module"] = module
124
125 def __getattr__(self, attr):
126 obj = getattr(self._module, attr)
127 if isinstance(obj, _DeprecatedValue):
128 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
129 obj = obj.value
130 return obj
131
132 def __setattr__(self, attr, value):
133 setattr(self._module, attr, value)
134
135 def __delattr__(self, attr):
136 obj = getattr(self._module, attr)
137 if isinstance(obj, _DeprecatedValue):
138 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
139
140 delattr(self._module, attr)
141
142 def __dir__(self):
143 return ["_module"] + dir(self._module)
144
145
146 def deprecated(value, module_name, message, warning_class):
147 module = sys.modules[module_name]
148 if not isinstance(module, _ModuleWithDeprecations):
149 sys.modules[module_name] = _ModuleWithDeprecations(module)
150 return _DeprecatedValue(value, message, warning_class)
151
152
153 def cached_property(func):
154 cached_name = "_cached_{0}".format(func)
155 sentinel = object()
156
157 def inner(instance):
158 cache = getattr(instance, cached_name, sentinel)
159 if cache is not sentinel:
160 return cache
161 result = func(instance)
162 setattr(instance, cached_name, result)
163 return result
164 return property(inner)
165
[end of src/cryptography/utils.py]
[start of src/cryptography/hazmat/primitives/constant_time.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import hmac
8
9 from cryptography.hazmat.bindings._constant_time import lib
10
11
12 if hasattr(hmac, "compare_digest"):
13 def bytes_eq(a, b):
14 if not isinstance(a, bytes) or not isinstance(b, bytes):
15 raise TypeError("a and b must be bytes.")
16
17 return hmac.compare_digest(a, b)
18
19 else:
20 def bytes_eq(a, b):
21 if not isinstance(a, bytes) or not isinstance(b, bytes):
22 raise TypeError("a and b must be bytes.")
23
24 return lib.Cryptography_constant_time_bytes_eq(
25 a, len(a), b, len(b)
26 ) == 1
27
[end of src/cryptography/hazmat/primitives/constant_time.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/constant_time.py b/src/cryptography/hazmat/primitives/constant_time.py
--- a/src/cryptography/hazmat/primitives/constant_time.py
+++ b/src/cryptography/hazmat/primitives/constant_time.py
@@ -5,7 +5,9 @@
from __future__ import absolute_import, division, print_function
import hmac
+import warnings
+from cryptography import utils
from cryptography.hazmat.bindings._constant_time import lib
@@ -17,6 +19,13 @@
return hmac.compare_digest(a, b)
else:
+ warnings.warn(
+ "Support for your Python version is deprecated. The next version of "
+ "cryptography will remove support. Please upgrade to a 2.7.x "
+ "release that supports hmac.compare_digest as soon as possible.",
+ utils.DeprecatedIn23,
+ )
+
def bytes_eq(a, b):
if not isinstance(a, bytes) or not isinstance(b, bytes):
raise TypeError("a and b must be bytes.")
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -22,6 +22,7 @@
# cycle ends.
PersistentlyDeprecated = CryptographyDeprecationWarning
DeprecatedIn21 = CryptographyDeprecationWarning
+DeprecatedIn23 = CryptographyDeprecationWarning
def _check_bytes(name, value):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/constant_time.py b/src/cryptography/hazmat/primitives/constant_time.py\n--- a/src/cryptography/hazmat/primitives/constant_time.py\n+++ b/src/cryptography/hazmat/primitives/constant_time.py\n@@ -5,7 +5,9 @@\n from __future__ import absolute_import, division, print_function\n \n import hmac\n+import warnings\n \n+from cryptography import utils\n from cryptography.hazmat.bindings._constant_time import lib\n \n \n@@ -17,6 +19,13 @@\n return hmac.compare_digest(a, b)\n \n else:\n+ warnings.warn(\n+ \"Support for your Python version is deprecated. The next version of \"\n+ \"cryptography will remove support. Please upgrade to a 2.7.x \"\n+ \"release that supports hmac.compare_digest as soon as possible.\",\n+ utils.DeprecatedIn23,\n+ )\n+\n def bytes_eq(a, b):\n if not isinstance(a, bytes) or not isinstance(b, bytes):\n raise TypeError(\"a and b must be bytes.\")\ndiff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -22,6 +22,7 @@\n # cycle ends.\n PersistentlyDeprecated = CryptographyDeprecationWarning\n DeprecatedIn21 = CryptographyDeprecationWarning\n+DeprecatedIn23 = CryptographyDeprecationWarning\n \n \n def _check_bytes(name, value):\n", "issue": "Drop Wheezy Builder?\nWheezy is EOL as of May 31, 2018 (https://wiki.debian.org/LTS). Should we drop it out of our test matrix going forward? We still have OpenSSL 1.0.1 coverage through building on Ubuntu 14.04 (supported through April 2019).\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n return int(binascii.hexlify(data), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n", "path": "src/cryptography/utils.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport hmac\n\nfrom cryptography.hazmat.bindings._constant_time import lib\n\n\nif hasattr(hmac, \"compare_digest\"):\n def bytes_eq(a, b):\n if not isinstance(a, bytes) or not isinstance(b, bytes):\n raise TypeError(\"a and b must be bytes.\")\n\n return hmac.compare_digest(a, b)\n\nelse:\n def bytes_eq(a, b):\n if not isinstance(a, bytes) or not isinstance(b, bytes):\n raise TypeError(\"a and b must be bytes.\")\n\n return lib.Cryptography_constant_time_bytes_eq(\n a, len(a), b, len(b)\n ) == 1\n", "path": "src/cryptography/hazmat/primitives/constant_time.py"}]} | 2,352 | 329 |
gh_patches_debug_9590 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-2965 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug?] `.filter` ignored on `just` strategy
Hi,
I hope this has not been reported before. I checked the issues and read the [changelog] ~but could not find anything related~ and this seems to be related to issue https://github.com/HypothesisWorks/hypothesis/issues/2036.
[changelog]: https://hypothesis.readthedocs.io/en/latest/changes.html
I noticed an unexpected difference in the behavior when `.filter` is applied on `just` strategy between Hypothesis 5.43.3 and Hypothesis 6.12.0 (the former ran on my machine, while the remote CI machine had 6.12.0 installed).
With Hypothesis 5.43.3 (expected behavior):
```python
import hypothesis.strategies
import hypothesis.strategies._internal.strategies
strategy = hypothesis.strategies.just(1).filter(lambda x: x > 10)
assert str(strategy) == "just(1).filter(lambda x: x > 10)"
assert type(strategy) == hypothesis.strategies._internal.strategies.FilteredStrategy
```
With Hypothesis 6.12.0 (unexpected behavior):
```python
import hypothesis.strategies
import hypothesis.strategies._internal.misc
strategy = hypothesis.strategies.just(1).filter(lambda x: x > 10)
assert str(strategy) == "just(1)"
assert type(strategy) == hypothesis.strategies._internal.misc.JustStrategy
```
This bug (?) is relevant for [icontract-hypothesis] when we test instance methods automatically where pre-conditions need to be applied on `self`. I'd expect the health check to be raised rather than the pre-conditions to be silently ignored.
[icontract-hypothesis]: https://github.com/mristin/icontract-hypothesis
</issue>
<code>
[start of hypothesis-python/src/hypothesis/strategies/_internal/misc.py]
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2021 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 from hypothesis.internal.reflection import get_pretty_function_description
17 from hypothesis.strategies._internal.strategies import (
18 FilteredStrategy,
19 SampledFromStrategy,
20 SearchStrategy,
21 T,
22 filter_not_satisfied,
23 is_simple_data,
24 )
25 from hypothesis.strategies._internal.utils import cacheable, defines_strategy
26
27
28 class JustStrategy(SampledFromStrategy):
29 """A strategy which always returns a single fixed value.
30
31 It's implemented as a length-one SampledFromStrategy so that all our
32 special-case logic for filtering and sets applies also to just(x).
33
34 The important difference from a SampledFromStrategy with only one
35 element to choose is that JustStrategy *never* touches the underlying
36 choice sequence, i.e. drawing neither reads from nor writes to `data`.
37 This is a reasonably important optimisation (or semantic distinction!)
38 for both JustStrategy and SampledFromStrategy.
39 """
40
41 @property
42 def value(self):
43 return self.elements[0]
44
45 def __repr__(self):
46 if self.value is None:
47 return "none()"
48 return f"just({get_pretty_function_description(self.value)})"
49
50 def calc_has_reusable_values(self, recur):
51 return True
52
53 def calc_is_cacheable(self, recur):
54 return is_simple_data(self.value)
55
56 def do_draw(self, data):
57 result = self._transform(self.value)
58 if result is filter_not_satisfied:
59 data.note_event(f"Aborted test because unable to satisfy {self!r}")
60 data.mark_invalid()
61 return result
62
63 def do_filtered_draw(self, data, filter_strategy):
64 if isinstance(filter_strategy, FilteredStrategy):
65 return self._transform(self.value, filter_strategy.flat_conditions)
66 return self._transform(self.value)
67
68
69 def just(value: T) -> SearchStrategy[T]:
70 """Return a strategy which only generates ``value``.
71
72 Note: ``value`` is not copied. Be wary of using mutable values.
73
74 If ``value`` is the result of a callable, you can use
75 :func:`builds(callable) <hypothesis.strategies.builds>` instead
76 of ``just(callable())`` to get a fresh value each time.
77
78 Examples from this strategy do not shrink (because there is only one).
79 """
80 return JustStrategy([value])
81
82
83 @defines_strategy(force_reusable_values=True)
84 def none() -> SearchStrategy[None]:
85 """Return a strategy which only generates None.
86
87 Examples from this strategy do not shrink (because there is only
88 one).
89 """
90 return just(None)
91
92
93 class Nothing(SearchStrategy):
94 def calc_is_empty(self, recur):
95 return True
96
97 def do_draw(self, data):
98 # This method should never be called because draw() will mark the
99 # data as invalid immediately because is_empty is True.
100 raise NotImplementedError("This should never happen")
101
102 def calc_has_reusable_values(self, recur):
103 return True
104
105 def __repr__(self):
106 return "nothing()"
107
108 def map(self, f):
109 return self
110
111 def filter(self, f):
112 return self
113
114 def flatmap(self, f):
115 return self
116
117
118 NOTHING = Nothing()
119
120
121 @cacheable
122 def nothing() -> SearchStrategy:
123 """This strategy never successfully draws a value and will always reject on
124 an attempt to draw.
125
126 Examples from this strategy do not shrink (because there are none).
127 """
128 return NOTHING
129
[end of hypothesis-python/src/hypothesis/strategies/_internal/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/misc.py b/hypothesis-python/src/hypothesis/strategies/_internal/misc.py
--- a/hypothesis-python/src/hypothesis/strategies/_internal/misc.py
+++ b/hypothesis-python/src/hypothesis/strategies/_internal/misc.py
@@ -43,9 +43,13 @@
return self.elements[0]
def __repr__(self):
+ suffix = "".join(
+ f".{name}({get_pretty_function_description(f)})"
+ for name, f in self._transformations
+ )
if self.value is None:
- return "none()"
- return f"just({get_pretty_function_description(self.value)})"
+ return "none()" + suffix
+ return f"just({get_pretty_function_description(self.value)}){suffix}"
def calc_has_reusable_values(self, recur):
return True
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/misc.py b/hypothesis-python/src/hypothesis/strategies/_internal/misc.py\n--- a/hypothesis-python/src/hypothesis/strategies/_internal/misc.py\n+++ b/hypothesis-python/src/hypothesis/strategies/_internal/misc.py\n@@ -43,9 +43,13 @@\n return self.elements[0]\n \n def __repr__(self):\n+ suffix = \"\".join(\n+ f\".{name}({get_pretty_function_description(f)})\"\n+ for name, f in self._transformations\n+ )\n if self.value is None:\n- return \"none()\"\n- return f\"just({get_pretty_function_description(self.value)})\"\n+ return \"none()\" + suffix\n+ return f\"just({get_pretty_function_description(self.value)}){suffix}\"\n \n def calc_has_reusable_values(self, recur):\n return True\n", "issue": "[Bug?] `.filter` ignored on `just` strategy\nHi,\r\nI hope this has not been reported before. I checked the issues and read the [changelog] ~but could not find anything related~ and this seems to be related to issue https://github.com/HypothesisWorks/hypothesis/issues/2036.\r\n\r\n[changelog]: https://hypothesis.readthedocs.io/en/latest/changes.html\r\n\r\nI noticed an unexpected difference in the behavior when `.filter` is applied on `just` strategy between Hypothesis 5.43.3 and Hypothesis 6.12.0 (the former ran on my machine, while the remote CI machine had 6.12.0 installed).\r\n\r\nWith Hypothesis 5.43.3 (expected behavior):\r\n```python\r\nimport hypothesis.strategies\r\nimport hypothesis.strategies._internal.strategies\r\n\r\nstrategy = hypothesis.strategies.just(1).filter(lambda x: x > 10)\r\nassert str(strategy) == \"just(1).filter(lambda x: x > 10)\"\r\nassert type(strategy) == hypothesis.strategies._internal.strategies.FilteredStrategy\r\n```\r\n\r\nWith Hypothesis 6.12.0 (unexpected behavior):\r\n\r\n```python\r\nimport hypothesis.strategies\r\nimport hypothesis.strategies._internal.misc\r\n\r\nstrategy = hypothesis.strategies.just(1).filter(lambda x: x > 10)\r\nassert str(strategy) == \"just(1)\"\r\nassert type(strategy) == hypothesis.strategies._internal.misc.JustStrategy\r\n```\r\n\r\nThis bug (?) is relevant for [icontract-hypothesis] when we test instance methods automatically where pre-conditions need to be applied on `self`. I'd expect the health check to be raised rather than the pre-conditions to be silently ignored.\r\n\r\n[icontract-hypothesis]: https://github.com/mristin/icontract-hypothesis\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom hypothesis.internal.reflection import get_pretty_function_description\nfrom hypothesis.strategies._internal.strategies import (\n FilteredStrategy,\n SampledFromStrategy,\n SearchStrategy,\n T,\n filter_not_satisfied,\n is_simple_data,\n)\nfrom hypothesis.strategies._internal.utils import cacheable, defines_strategy\n\n\nclass JustStrategy(SampledFromStrategy):\n \"\"\"A strategy which always returns a single fixed value.\n\n It's implemented as a length-one SampledFromStrategy so that all our\n special-case logic for filtering and sets applies also to just(x).\n\n The important difference from a SampledFromStrategy with only one\n element to choose is that JustStrategy *never* touches the underlying\n choice sequence, i.e. drawing neither reads from nor writes to `data`.\n This is a reasonably important optimisation (or semantic distinction!)\n for both JustStrategy and SampledFromStrategy.\n \"\"\"\n\n @property\n def value(self):\n return self.elements[0]\n\n def __repr__(self):\n if self.value is None:\n return \"none()\"\n return f\"just({get_pretty_function_description(self.value)})\"\n\n def calc_has_reusable_values(self, recur):\n return True\n\n def calc_is_cacheable(self, recur):\n return is_simple_data(self.value)\n\n def do_draw(self, data):\n result = self._transform(self.value)\n if result is filter_not_satisfied:\n data.note_event(f\"Aborted test because unable to satisfy {self!r}\")\n data.mark_invalid()\n return result\n\n def do_filtered_draw(self, data, filter_strategy):\n if isinstance(filter_strategy, FilteredStrategy):\n return self._transform(self.value, filter_strategy.flat_conditions)\n return self._transform(self.value)\n\n\ndef just(value: T) -> SearchStrategy[T]:\n \"\"\"Return a strategy which only generates ``value``.\n\n Note: ``value`` is not copied. Be wary of using mutable values.\n\n If ``value`` is the result of a callable, you can use\n :func:`builds(callable) <hypothesis.strategies.builds>` instead\n of ``just(callable())`` to get a fresh value each time.\n\n Examples from this strategy do not shrink (because there is only one).\n \"\"\"\n return JustStrategy([value])\n\n\n@defines_strategy(force_reusable_values=True)\ndef none() -> SearchStrategy[None]:\n \"\"\"Return a strategy which only generates None.\n\n Examples from this strategy do not shrink (because there is only\n one).\n \"\"\"\n return just(None)\n\n\nclass Nothing(SearchStrategy):\n def calc_is_empty(self, recur):\n return True\n\n def do_draw(self, data):\n # This method should never be called because draw() will mark the\n # data as invalid immediately because is_empty is True.\n raise NotImplementedError(\"This should never happen\")\n\n def calc_has_reusable_values(self, recur):\n return True\n\n def __repr__(self):\n return \"nothing()\"\n\n def map(self, f):\n return self\n\n def filter(self, f):\n return self\n\n def flatmap(self, f):\n return self\n\n\nNOTHING = Nothing()\n\n\n@cacheable\ndef nothing() -> SearchStrategy:\n \"\"\"This strategy never successfully draws a value and will always reject on\n an attempt to draw.\n\n Examples from this strategy do not shrink (because there are none).\n \"\"\"\n return NOTHING\n", "path": "hypothesis-python/src/hypothesis/strategies/_internal/misc.py"}]} | 2,140 | 210 |
gh_patches_debug_20630 | rasdani/github-patches | git_diff | getnikola__nikola-980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`nikola -V/--version` doesn’t work outside of a site
```
(nikola-py2)[kwpolska@kw-cassandra ~WORKON_HOME]% nikola --version
[2014-01-04T17:40:42Z] ERROR: Nikola: This command needs to run inside an existing Nikola site.
(nikola-py2)[kwpolska@kw-cassandra ~WORKON_HOME]% cd ~/nikola
(nikola-py2)[kwpolska@kw-cassandra nikola Gmaster]% nikola --version
Nikola version 6.2.1
```
</issue>
<code>
[start of nikola/main.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function, unicode_literals
28 from operator import attrgetter
29 import os
30 import shutil
31 import sys
32 import traceback
33
34 from doit.loader import generate_tasks
35 from doit.cmd_base import TaskLoader
36 from doit.reporter import ExecutedOnlyReporter
37 from doit.doit_cmd import DoitMain
38 from doit.cmd_help import Help as DoitHelp
39 from doit.cmd_run import Run as DoitRun
40 from doit.cmd_clean import Clean as DoitClean
41 from doit.cmd_auto import Auto as DoitAuto
42 from logbook import NullHandler
43
44 from . import __version__
45 from .nikola import Nikola
46 from .utils import _reload, sys_decode, get_root_dir, LOGGER, STRICT_HANDLER
47
48
49 config = {}
50
51
52 def main(args):
53 quiet = False
54 if len(args) > 0 and args[0] == 'build' and '--strict' in args:
55 LOGGER.notice('Running in strict mode')
56 STRICT_HANDLER.push_application()
57 if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:
58 nullhandler = NullHandler()
59 nullhandler.push_application()
60 quiet = True
61 global config
62
63 root = get_root_dir()
64 if root:
65 os.chdir(root)
66
67 sys.path.append('')
68 try:
69 import conf
70 _reload(conf)
71 config = conf.__dict__
72 except Exception:
73 if os.path.exists('conf.py'):
74 msg = traceback.format_exc(0).splitlines()[1]
75 LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))
76 sys.exit(1)
77 config = {}
78
79 site = Nikola(**config)
80 return DoitNikola(site, quiet).run(args)
81
82
83 class Help(DoitHelp):
84 """show Nikola usage instead of doit """
85
86 @staticmethod
87 def print_usage(cmds):
88 """print nikola "usage" (basic help) instructions"""
89 print("Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\n\n")
90 print("Available commands:")
91 for cmd in sorted(cmds.values(), key=attrgetter('name')):
92 print(" nikola %-*s %s" % (20, cmd.name, cmd.doc_purpose))
93 print("")
94 print(" nikola help show help / reference")
95 print(" nikola help <command> show command usage")
96 print(" nikola help <task-name> show task usage")
97
98
99 class Build(DoitRun):
100 """expose "run" command as "build" for backward compatibility"""
101 def __init__(self, *args, **kw):
102 opts = list(self.cmd_options)
103 opts.append(
104 {
105 'name': 'strict',
106 'long': 'strict',
107 'default': False,
108 'type': bool,
109 'help': "Fail on things that would normally be warnings.",
110 }
111 )
112 opts.append(
113 {
114 'name': 'quiet',
115 'long': 'quiet',
116 'short': 'q',
117 'default': False,
118 'type': bool,
119 'help': "Run quietly.",
120 }
121 )
122 self.cmd_options = tuple(opts)
123 super(Build, self).__init__(*args, **kw)
124
125
126 class Clean(DoitClean):
127 """A clean that removes cache/"""
128
129 def clean_tasks(self, tasks, dryrun):
130 if not dryrun and config:
131 cache_folder = config.get('CACHE_FOLDER', 'cache')
132 if os.path.exists(cache_folder):
133 shutil.rmtree(cache_folder)
134 return super(Clean, self).clean_tasks(tasks, dryrun)
135
136 # Nikola has its own "auto" commands that uses livereload.
137 # Expose original doit "auto" command as "doit_auto".
138 DoitAuto.name = 'doit_auto'
139
140
141 class NikolaTaskLoader(TaskLoader):
142 """custom task loader to get tasks from Nikola instead of dodo.py file"""
143 def __init__(self, nikola, quiet=False):
144 self.nikola = nikola
145 self.quiet = quiet
146
147 def load_tasks(self, cmd, opt_values, pos_args):
148 if self.quiet:
149 DOIT_CONFIG = {
150 'verbosity': 0,
151 'reporter': 'zero',
152 }
153 else:
154 DOIT_CONFIG = {
155 'reporter': ExecutedOnlyReporter,
156 }
157 DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']
158 tasks = generate_tasks(
159 'render_site',
160 self.nikola.gen_tasks('render_site', "Task", 'Group of tasks to render the site.'))
161 latetasks = generate_tasks(
162 'post_render',
163 self.nikola.gen_tasks('post_render', "LateTask", 'Group of tasks to be executes after site is rendered.'))
164 return tasks + latetasks, DOIT_CONFIG
165
166
167 class DoitNikola(DoitMain):
168 # overwite help command
169 DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean, DoitAuto]
170 TASK_LOADER = NikolaTaskLoader
171
172 def __init__(self, nikola, quiet=False):
173 self.nikola = nikola
174 self.task_loader = self.TASK_LOADER(nikola, quiet)
175
176 def get_commands(self):
177 # core doit commands
178 cmds = DoitMain.get_commands(self)
179 # load nikola commands
180 for name, cmd in self.nikola.commands.items():
181 cmds[name] = cmd
182 return cmds
183
184 def run(self, cmd_args):
185 sub_cmds = self.get_commands()
186 args = self.process_args(cmd_args)
187 args = [sys_decode(arg) for arg in args]
188
189 if len(args) == 0 or any(arg in ["--help", '-h'] for arg in args):
190 cmd_args = ['help']
191 args = ['help']
192 # Hide run because Nikola uses build
193 sub_cmds.pop('run')
194
195 if len(args) == 0 or args[0] not in sub_cmds.keys() or \
196 args[0] == 'build':
197 # Check for conf.py before launching run
198 if not self.nikola.configured:
199 LOGGER.error("This command needs to run inside an "
200 "existing Nikola site.")
201 return False
202 return super(DoitNikola, self).run(cmd_args)
203
204 @staticmethod
205 def print_version():
206 print("Nikola version " + __version__)
207
[end of nikola/main.py]
[start of nikola/plugins/command/version.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function
28
29 from nikola.plugin_categories import Command
30 from nikola import __version__
31
32
33 class CommandVersion(Command):
34 """Print the version."""
35
36 name = "version"
37
38 doc_usage = ""
39 needs_config = False
40 doc_purpose = "print the Nikola version number"
41
42 def _execute(self, options={}, args=None):
43 """Print the version number."""
44 print("Nikola version " + __version__)
45
[end of nikola/plugins/command/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/main.py b/nikola/main.py
--- a/nikola/main.py
+++ b/nikola/main.py
@@ -191,7 +191,9 @@
args = ['help']
# Hide run because Nikola uses build
sub_cmds.pop('run')
-
+ if len(args) == 0 or any(arg in ["--version", '-V'] for arg in args):
+ cmd_args = ['version']
+ args = ['version']
if len(args) == 0 or args[0] not in sub_cmds.keys() or \
args[0] == 'build':
# Check for conf.py before launching run
@@ -203,4 +205,4 @@
@staticmethod
def print_version():
- print("Nikola version " + __version__)
+ print("Nikola v" + __version__)
diff --git a/nikola/plugins/command/version.py b/nikola/plugins/command/version.py
--- a/nikola/plugins/command/version.py
+++ b/nikola/plugins/command/version.py
@@ -41,4 +41,4 @@
def _execute(self, options={}, args=None):
"""Print the version number."""
- print("Nikola version " + __version__)
+ print("Nikola v" + __version__)
| {"golden_diff": "diff --git a/nikola/main.py b/nikola/main.py\n--- a/nikola/main.py\n+++ b/nikola/main.py\n@@ -191,7 +191,9 @@\n args = ['help']\n # Hide run because Nikola uses build\n sub_cmds.pop('run')\n-\n+ if len(args) == 0 or any(arg in [\"--version\", '-V'] for arg in args):\n+ cmd_args = ['version']\n+ args = ['version']\n if len(args) == 0 or args[0] not in sub_cmds.keys() or \\\n args[0] == 'build':\n # Check for conf.py before launching run\n@@ -203,4 +205,4 @@\n \n @staticmethod\n def print_version():\n- print(\"Nikola version \" + __version__)\n+ print(\"Nikola v\" + __version__)\ndiff --git a/nikola/plugins/command/version.py b/nikola/plugins/command/version.py\n--- a/nikola/plugins/command/version.py\n+++ b/nikola/plugins/command/version.py\n@@ -41,4 +41,4 @@\n \n def _execute(self, options={}, args=None):\n \"\"\"Print the version number.\"\"\"\n- print(\"Nikola version \" + __version__)\n+ print(\"Nikola v\" + __version__)\n", "issue": "`nikola -V/--version` doesn\u2019t work outside of a site\n```\n(nikola-py2)[kwpolska@kw-cassandra ~WORKON_HOME]% nikola --version\n[2014-01-04T17:40:42Z] ERROR: Nikola: This command needs to run inside an existing Nikola site.\n(nikola-py2)[kwpolska@kw-cassandra ~WORKON_HOME]% cd ~/nikola\n(nikola-py2)[kwpolska@kw-cassandra nikola Gmaster]% nikola --version\nNikola version 6.2.1\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\nfrom operator import attrgetter\nimport os\nimport shutil\nimport sys\nimport traceback\n\nfrom doit.loader import generate_tasks\nfrom doit.cmd_base import TaskLoader\nfrom doit.reporter import ExecutedOnlyReporter\nfrom doit.doit_cmd import DoitMain\nfrom doit.cmd_help import Help as DoitHelp\nfrom doit.cmd_run import Run as DoitRun\nfrom doit.cmd_clean import Clean as DoitClean\nfrom doit.cmd_auto import Auto as DoitAuto\nfrom logbook import NullHandler\n\nfrom . import __version__\nfrom .nikola import Nikola\nfrom .utils import _reload, sys_decode, get_root_dir, LOGGER, STRICT_HANDLER\n\n\nconfig = {}\n\n\ndef main(args):\n quiet = False\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:\n nullhandler = NullHandler()\n nullhandler.push_application()\n quiet = True\n global config\n\n root = get_root_dir()\n if root:\n os.chdir(root)\n\n sys.path.append('')\n try:\n import conf\n _reload(conf)\n config = conf.__dict__\n except Exception:\n if os.path.exists('conf.py'):\n msg = traceback.format_exc(0).splitlines()[1]\n LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))\n sys.exit(1)\n config = {}\n\n site = Nikola(**config)\n return DoitNikola(site, quiet).run(args)\n\n\nclass Help(DoitHelp):\n \"\"\"show Nikola usage instead of doit \"\"\"\n\n @staticmethod\n def print_usage(cmds):\n \"\"\"print nikola \"usage\" (basic help) instructions\"\"\"\n print(\"Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\\n\\n\")\n print(\"Available commands:\")\n for cmd in sorted(cmds.values(), key=attrgetter('name')):\n print(\" nikola %-*s %s\" % (20, cmd.name, cmd.doc_purpose))\n print(\"\")\n print(\" nikola help show help / reference\")\n print(\" nikola help <command> show command usage\")\n print(\" nikola help <task-name> show task usage\")\n\n\nclass Build(DoitRun):\n \"\"\"expose \"run\" command as \"build\" for backward compatibility\"\"\"\n def __init__(self, *args, **kw):\n opts = list(self.cmd_options)\n opts.append(\n {\n 'name': 'strict',\n 'long': 'strict',\n 'default': False,\n 'type': bool,\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n opts.append(\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Run quietly.\",\n }\n )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n\n\nclass Clean(DoitClean):\n \"\"\"A clean that removes cache/\"\"\"\n\n def clean_tasks(self, tasks, dryrun):\n if not dryrun and config:\n cache_folder = config.get('CACHE_FOLDER', 'cache')\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n return super(Clean, self).clean_tasks(tasks, dryrun)\n\n# Nikola has its own \"auto\" commands that uses livereload.\n# Expose original doit \"auto\" command as \"doit_auto\".\nDoitAuto.name = 'doit_auto'\n\n\nclass NikolaTaskLoader(TaskLoader):\n \"\"\"custom task loader to get tasks from Nikola instead of dodo.py file\"\"\"\n def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n self.quiet = quiet\n\n def load_tasks(self, cmd, opt_values, pos_args):\n if self.quiet:\n DOIT_CONFIG = {\n 'verbosity': 0,\n 'reporter': 'zero',\n }\n else:\n DOIT_CONFIG = {\n 'reporter': ExecutedOnlyReporter,\n }\n DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']\n tasks = generate_tasks(\n 'render_site',\n self.nikola.gen_tasks('render_site', \"Task\", 'Group of tasks to render the site.'))\n latetasks = generate_tasks(\n 'post_render',\n self.nikola.gen_tasks('post_render', \"LateTask\", 'Group of tasks to be executes after site is rendered.'))\n return tasks + latetasks, DOIT_CONFIG\n\n\nclass DoitNikola(DoitMain):\n # overwite help command\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean, DoitAuto]\n TASK_LOADER = NikolaTaskLoader\n\n def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n self.task_loader = self.TASK_LOADER(nikola, quiet)\n\n def get_commands(self):\n # core doit commands\n cmds = DoitMain.get_commands(self)\n # load nikola commands\n for name, cmd in self.nikola.commands.items():\n cmds[name] = cmd\n return cmds\n\n def run(self, cmd_args):\n sub_cmds = self.get_commands()\n args = self.process_args(cmd_args)\n args = [sys_decode(arg) for arg in args]\n\n if len(args) == 0 or any(arg in [\"--help\", '-h'] for arg in args):\n cmd_args = ['help']\n args = ['help']\n # Hide run because Nikola uses build\n sub_cmds.pop('run')\n\n if len(args) == 0 or args[0] not in sub_cmds.keys() or \\\n args[0] == 'build':\n # Check for conf.py before launching run\n if not self.nikola.configured:\n LOGGER.error(\"This command needs to run inside an \"\n \"existing Nikola site.\")\n return False\n return super(DoitNikola, self).run(cmd_args)\n\n @staticmethod\n def print_version():\n print(\"Nikola version \" + __version__)\n", "path": "nikola/main.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function\n\nfrom nikola.plugin_categories import Command\nfrom nikola import __version__\n\n\nclass CommandVersion(Command):\n \"\"\"Print the version.\"\"\"\n\n name = \"version\"\n\n doc_usage = \"\"\n needs_config = False\n doc_purpose = \"print the Nikola version number\"\n\n def _execute(self, options={}, args=None):\n \"\"\"Print the version number.\"\"\"\n print(\"Nikola version \" + __version__)\n", "path": "nikola/plugins/command/version.py"}]} | 3,327 | 296 |
gh_patches_debug_2694 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Should we compare Entity._meanings in __eq__
/cc @tseaver @pcostell
</issue>
<code>
[start of gcloud/datastore/entity.py]
1 # Copyright 2014 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Class for representing a single entity in the Cloud Datastore."""
16
17
18 from gcloud._helpers import _ensure_tuple_or_list
19
20
21 class Entity(dict):
22 """Entities are akin to rows in a relational database
23
24 An entity storing the actual instance of data.
25
26 Each entity is officially represented with a
27 :class:`gcloud.datastore.key.Key` class, however it is possible that
28 you might create an Entity with only a partial Key (that is, a Key
29 with a Kind, and possibly a parent, but without an ID). In such a
30 case, the datastore service will automatically assign an ID to the
31 partial key.
32
33 Entities in this API act like dictionaries with extras built in that
34 allow you to delete or persist the data stored on the entity.
35
36 Entities are mutable and act like a subclass of a dictionary.
37 This means you could take an existing entity and change the key
38 to duplicate the object.
39
40 Use :func:`gcloud.datastore.get` to retrieve an existing entity.
41
42 >>> datastore.get(key)
43 <Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}>
44
45 You can the set values on the entity just like you would on any
46 other dictionary.
47
48 >>> entity['age'] = 20
49 >>> entity['name'] = 'JJ'
50 >>> entity
51 <Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}>
52
53 And you can convert an entity to a regular Python dictionary with the
54 ``dict`` builtin:
55
56 >>> dict(entity)
57 {'age': 20, 'name': 'JJ'}
58
59 .. note::
60
61 When saving an entity to the backend, values which are "text"
62 (``unicode`` in Python2, ``str`` in Python3) will be saved using
63 the 'text_value' field, after being encoded to UTF-8. When
64 retrieved from the back-end, such values will be decoded to "text"
65 again. Values which are "bytes" (``str`` in Python2, ``bytes`` in
66 Python3), will be saved using the 'blob_value' field, without
67 any decoding / encoding step.
68
69 :type key: :class:`gcloud.datastore.key.Key`
70 :param key: Optional key to be set on entity. Required for
71 :func:`gcloud.datastore.put()` and
72 :func:`gcloud.datastore.put_multi()`
73
74 :type exclude_from_indexes: tuple of string
75 :param exclude_from_indexes: Names of fields whose values are not to be
76 indexed for this entity.
77 """
78
79 def __init__(self, key=None, exclude_from_indexes=()):
80 super(Entity, self).__init__()
81 self.key = key
82 self._exclude_from_indexes = set(_ensure_tuple_or_list(
83 'exclude_from_indexes', exclude_from_indexes))
84 # NOTE: This will be populated when parsing a protobuf in
85 # gcloud.datastore.helpers.entity_from_protobuf.
86 self._meanings = {}
87
88 def __eq__(self, other):
89 """Compare two entities for equality.
90
91 Entities compare equal if their keys compare equal, and their
92 properties compare equal.
93
94 :rtype: boolean
95 :returns: True if the entities compare equal, else False.
96 """
97 if not isinstance(other, Entity):
98 return False
99
100 return (self.key == other.key and
101 super(Entity, self).__eq__(other))
102
103 def __ne__(self, other):
104 """Compare two entities for inequality.
105
106 Entities compare equal if their keys compare equal, and their
107 properties compare equal.
108
109 :rtype: boolean
110 :returns: False if the entities compare equal, else True.
111 """
112 return not self.__eq__(other)
113
114 @property
115 def kind(self):
116 """Get the kind of the current entity.
117
118 .. note::
119 This relies entirely on the :class:`gcloud.datastore.key.Key`
120 set on the entity. That means that we're not storing the kind
121 of the entity at all, just the properties and a pointer to a
122 Key which knows its Kind.
123 """
124 if self.key:
125 return self.key.kind
126
127 @property
128 def exclude_from_indexes(self):
129 """Names of fields which are *not* to be indexed for this entity.
130
131 :rtype: sequence of field names
132 """
133 return frozenset(self._exclude_from_indexes)
134
135 def __repr__(self):
136 if self.key:
137 return '<Entity%s %s>' % (self.key.path,
138 super(Entity, self).__repr__())
139 else:
140 return '<Entity %s>' % (super(Entity, self).__repr__())
141
[end of gcloud/datastore/entity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/datastore/entity.py b/gcloud/datastore/entity.py
--- a/gcloud/datastore/entity.py
+++ b/gcloud/datastore/entity.py
@@ -98,6 +98,8 @@
return False
return (self.key == other.key and
+ self._exclude_from_indexes == other._exclude_from_indexes and
+ self._meanings == other._meanings and
super(Entity, self).__eq__(other))
def __ne__(self, other):
| {"golden_diff": "diff --git a/gcloud/datastore/entity.py b/gcloud/datastore/entity.py\n--- a/gcloud/datastore/entity.py\n+++ b/gcloud/datastore/entity.py\n@@ -98,6 +98,8 @@\n return False\n \n return (self.key == other.key and\n+ self._exclude_from_indexes == other._exclude_from_indexes and\n+ self._meanings == other._meanings and\n super(Entity, self).__eq__(other))\n \n def __ne__(self, other):\n", "issue": "Should we compare Entity._meanings in __eq__\n/cc @tseaver @pcostell \n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Class for representing a single entity in the Cloud Datastore.\"\"\"\n\n\nfrom gcloud._helpers import _ensure_tuple_or_list\n\n\nclass Entity(dict):\n \"\"\"Entities are akin to rows in a relational database\n\n An entity storing the actual instance of data.\n\n Each entity is officially represented with a\n :class:`gcloud.datastore.key.Key` class, however it is possible that\n you might create an Entity with only a partial Key (that is, a Key\n with a Kind, and possibly a parent, but without an ID). In such a\n case, the datastore service will automatically assign an ID to the\n partial key.\n\n Entities in this API act like dictionaries with extras built in that\n allow you to delete or persist the data stored on the entity.\n\n Entities are mutable and act like a subclass of a dictionary.\n This means you could take an existing entity and change the key\n to duplicate the object.\n\n Use :func:`gcloud.datastore.get` to retrieve an existing entity.\n\n >>> datastore.get(key)\n <Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}>\n\n You can the set values on the entity just like you would on any\n other dictionary.\n\n >>> entity['age'] = 20\n >>> entity['name'] = 'JJ'\n >>> entity\n <Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}>\n\n And you can convert an entity to a regular Python dictionary with the\n ``dict`` builtin:\n\n >>> dict(entity)\n {'age': 20, 'name': 'JJ'}\n\n .. note::\n\n When saving an entity to the backend, values which are \"text\"\n (``unicode`` in Python2, ``str`` in Python3) will be saved using\n the 'text_value' field, after being encoded to UTF-8. When\n retrieved from the back-end, such values will be decoded to \"text\"\n again. Values which are \"bytes\" (``str`` in Python2, ``bytes`` in\n Python3), will be saved using the 'blob_value' field, without\n any decoding / encoding step.\n\n :type key: :class:`gcloud.datastore.key.Key`\n :param key: Optional key to be set on entity. Required for\n :func:`gcloud.datastore.put()` and\n :func:`gcloud.datastore.put_multi()`\n\n :type exclude_from_indexes: tuple of string\n :param exclude_from_indexes: Names of fields whose values are not to be\n indexed for this entity.\n \"\"\"\n\n def __init__(self, key=None, exclude_from_indexes=()):\n super(Entity, self).__init__()\n self.key = key\n self._exclude_from_indexes = set(_ensure_tuple_or_list(\n 'exclude_from_indexes', exclude_from_indexes))\n # NOTE: This will be populated when parsing a protobuf in\n # gcloud.datastore.helpers.entity_from_protobuf.\n self._meanings = {}\n\n def __eq__(self, other):\n \"\"\"Compare two entities for equality.\n\n Entities compare equal if their keys compare equal, and their\n properties compare equal.\n\n :rtype: boolean\n :returns: True if the entities compare equal, else False.\n \"\"\"\n if not isinstance(other, Entity):\n return False\n\n return (self.key == other.key and\n super(Entity, self).__eq__(other))\n\n def __ne__(self, other):\n \"\"\"Compare two entities for inequality.\n\n Entities compare equal if their keys compare equal, and their\n properties compare equal.\n\n :rtype: boolean\n :returns: False if the entities compare equal, else True.\n \"\"\"\n return not self.__eq__(other)\n\n @property\n def kind(self):\n \"\"\"Get the kind of the current entity.\n\n .. note::\n This relies entirely on the :class:`gcloud.datastore.key.Key`\n set on the entity. That means that we're not storing the kind\n of the entity at all, just the properties and a pointer to a\n Key which knows its Kind.\n \"\"\"\n if self.key:\n return self.key.kind\n\n @property\n def exclude_from_indexes(self):\n \"\"\"Names of fields which are *not* to be indexed for this entity.\n\n :rtype: sequence of field names\n \"\"\"\n return frozenset(self._exclude_from_indexes)\n\n def __repr__(self):\n if self.key:\n return '<Entity%s %s>' % (self.key.path,\n super(Entity, self).__repr__())\n else:\n return '<Entity %s>' % (super(Entity, self).__repr__())\n", "path": "gcloud/datastore/entity.py"}]} | 2,052 | 110 |
gh_patches_debug_7392 | rasdani/github-patches | git_diff | pyca__cryptography-480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use typedef ... *FOO_PTR; where appropriate.
So that FFI knows that the underlying type is opaque but that this is a pointer.
This makes `ffi.typeof` work correctly on bindings where functions take opaque typedef pointers such as some of the `ENGINE_` stuff.
See here for an example of it going wrong https://gist.github.com/public/8456596
</issue>
<code>
[start of cryptography/hazmat/bindings/openssl/engine.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 INCLUDES = """
15 #include <openssl/engine.h>
16 """
17
18 TYPES = """
19 typedef ... ENGINE;
20 typedef ... RSA_METHOD;
21 typedef ... DSA_METHOD;
22 typedef ... ECDH_METHOD;
23 typedef ... ECDSA_METHOD;
24 typedef ... DH_METHOD;
25 typedef ... RAND_METHOD;
26 typedef ... STORE_METHOD;
27 typedef ... ENGINE_GEN_INT_FUNC_PTR;
28 typedef ... ENGINE_CTRL_FUNC_PTR;
29 typedef ... ENGINE_LOAD_KEY_PTR;
30 typedef ... ENGINE_CIPHERS_PTR;
31 typedef ... ENGINE_DIGESTS_PTR;
32 typedef ... ENGINE_CMD_DEFN;
33 typedef ... UI_METHOD;
34
35 static const unsigned int ENGINE_METHOD_RSA;
36 static const unsigned int ENGINE_METHOD_DSA;
37 static const unsigned int ENGINE_METHOD_RAND;
38 static const unsigned int ENGINE_METHOD_ECDH;
39 static const unsigned int ENGINE_METHOD_ECDSA;
40 static const unsigned int ENGINE_METHOD_CIPHERS;
41 static const unsigned int ENGINE_METHOD_DIGESTS;
42 static const unsigned int ENGINE_METHOD_STORE;
43 static const unsigned int ENGINE_METHOD_ALL;
44 static const unsigned int ENGINE_METHOD_NONE;
45 """
46
47 FUNCTIONS = """
48 ENGINE *ENGINE_get_first(void);
49 ENGINE *ENGINE_get_last(void);
50 ENGINE *ENGINE_get_next(ENGINE *);
51 ENGINE *ENGINE_get_prev(ENGINE *);
52 int ENGINE_add(ENGINE *);
53 int ENGINE_remove(ENGINE *);
54 ENGINE *ENGINE_by_id(const char *);
55 int ENGINE_init(ENGINE *);
56 int ENGINE_finish(ENGINE *);
57 void ENGINE_load_openssl(void);
58 void ENGINE_load_dynamic(void);
59 void ENGINE_load_cryptodev(void);
60 void ENGINE_load_builtin_engines(void);
61 void ENGINE_cleanup(void);
62 ENGINE *ENGINE_get_default_RSA(void);
63 ENGINE *ENGINE_get_default_DSA(void);
64 ENGINE *ENGINE_get_default_ECDH(void);
65 ENGINE *ENGINE_get_default_ECDSA(void);
66 ENGINE *ENGINE_get_default_DH(void);
67 ENGINE *ENGINE_get_default_RAND(void);
68 ENGINE *ENGINE_get_cipher_engine(int);
69 ENGINE *ENGINE_get_digest_engine(int);
70 int ENGINE_set_default_RSA(ENGINE *);
71 int ENGINE_set_default_DSA(ENGINE *);
72 int ENGINE_set_default_ECDH(ENGINE *);
73 int ENGINE_set_default_ECDSA(ENGINE *);
74 int ENGINE_set_default_DH(ENGINE *);
75 int ENGINE_set_default_RAND(ENGINE *);
76 int ENGINE_set_default_ciphers(ENGINE *);
77 int ENGINE_set_default_digests(ENGINE *);
78 int ENGINE_set_default_string(ENGINE *, const char *);
79 int ENGINE_set_default(ENGINE *, unsigned int);
80 unsigned int ENGINE_get_table_flags(void);
81 void ENGINE_set_table_flags(unsigned int);
82 int ENGINE_register_RSA(ENGINE *);
83 void ENGINE_unregister_RSA(ENGINE *);
84 void ENGINE_register_all_RSA(void);
85 int ENGINE_register_DSA(ENGINE *);
86 void ENGINE_unregister_DSA(ENGINE *);
87 void ENGINE_register_all_DSA(void);
88 int ENGINE_register_ECDH(ENGINE *);
89 void ENGINE_unregister_ECDH(ENGINE *);
90 void ENGINE_register_all_ECDH(void);
91 int ENGINE_register_ECDSA(ENGINE *);
92 void ENGINE_unregister_ECDSA(ENGINE *);
93 void ENGINE_register_all_ECDSA(void);
94 int ENGINE_register_DH(ENGINE *);
95 void ENGINE_unregister_DH(ENGINE *);
96 void ENGINE_register_all_DH(void);
97 int ENGINE_register_RAND(ENGINE *);
98 void ENGINE_unregister_RAND(ENGINE *);
99 void ENGINE_register_all_RAND(void);
100 int ENGINE_register_STORE(ENGINE *);
101 void ENGINE_unregister_STORE(ENGINE *);
102 void ENGINE_register_all_STORE(void);
103 int ENGINE_register_ciphers(ENGINE *);
104 void ENGINE_unregister_ciphers(ENGINE *);
105 void ENGINE_register_all_ciphers(void);
106 int ENGINE_register_digests(ENGINE *);
107 void ENGINE_unregister_digests(ENGINE *);
108 void ENGINE_register_all_digests(void);
109 int ENGINE_register_complete(ENGINE *);
110 int ENGINE_register_all_complete(void);
111 int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
112 int ENGINE_cmd_is_executable(ENGINE *, int);
113 int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
114 int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
115
116 ENGINE *ENGINE_new(void);
117 int ENGINE_free(ENGINE *);
118 int ENGINE_up_ref(ENGINE *);
119 int ENGINE_set_id(ENGINE *, const char *);
120 int ENGINE_set_name(ENGINE *, const char *);
121 int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
122 int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
123 int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);
124 int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);
125 int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
126 int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
127 int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);
128 int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
129 int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
130 int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
131 int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
132 int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
133 int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
134 int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
135 int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
136 int ENGINE_set_flags(ENGINE *, int);
137 int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
138 const char *ENGINE_get_id(const ENGINE *);
139 const char *ENGINE_get_name(const ENGINE *);
140 const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
141 const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
142 const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);
143 const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);
144 const DH_METHOD *ENGINE_get_DH(const ENGINE *);
145 const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
146 const STORE_METHOD *ENGINE_get_STORE(const ENGINE *);
147
148 const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
149 const EVP_MD *ENGINE_get_digest(ENGINE *, int);
150 int ENGINE_get_flags(const ENGINE *);
151 const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
152 EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
153 EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
154 void ENGINE_add_conf_module(void);
155 """
156
157 MACROS = """
158 """
159
160 CUSTOMIZATIONS = """
161 """
162
163 CONDITIONAL_NAMES = {}
164
[end of cryptography/hazmat/bindings/openssl/engine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cryptography/hazmat/bindings/openssl/engine.py b/cryptography/hazmat/bindings/openssl/engine.py
--- a/cryptography/hazmat/bindings/openssl/engine.py
+++ b/cryptography/hazmat/bindings/openssl/engine.py
@@ -24,11 +24,11 @@
typedef ... DH_METHOD;
typedef ... RAND_METHOD;
typedef ... STORE_METHOD;
-typedef ... ENGINE_GEN_INT_FUNC_PTR;
-typedef ... ENGINE_CTRL_FUNC_PTR;
-typedef ... ENGINE_LOAD_KEY_PTR;
-typedef ... ENGINE_CIPHERS_PTR;
-typedef ... ENGINE_DIGESTS_PTR;
+typedef ... *ENGINE_GEN_INT_FUNC_PTR;
+typedef ... *ENGINE_CTRL_FUNC_PTR;
+typedef ... *ENGINE_LOAD_KEY_PTR;
+typedef ... *ENGINE_CIPHERS_PTR;
+typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
| {"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/engine.py b/cryptography/hazmat/bindings/openssl/engine.py\n--- a/cryptography/hazmat/bindings/openssl/engine.py\n+++ b/cryptography/hazmat/bindings/openssl/engine.py\n@@ -24,11 +24,11 @@\n typedef ... DH_METHOD;\n typedef ... RAND_METHOD;\n typedef ... STORE_METHOD;\n-typedef ... ENGINE_GEN_INT_FUNC_PTR;\n-typedef ... ENGINE_CTRL_FUNC_PTR;\n-typedef ... ENGINE_LOAD_KEY_PTR;\n-typedef ... ENGINE_CIPHERS_PTR;\n-typedef ... ENGINE_DIGESTS_PTR;\n+typedef ... *ENGINE_GEN_INT_FUNC_PTR;\n+typedef ... *ENGINE_CTRL_FUNC_PTR;\n+typedef ... *ENGINE_LOAD_KEY_PTR;\n+typedef ... *ENGINE_CIPHERS_PTR;\n+typedef ... *ENGINE_DIGESTS_PTR;\n typedef ... ENGINE_CMD_DEFN;\n typedef ... UI_METHOD;\n", "issue": "Use typedef ... *FOO_PTR; where appropriate.\nSo that FFI knows that the underlying type is opaque but that this is a pointer.\n\nThis makes `ffi.typeof` work correctly on bindings where functions take opaque typedef pointers such as some of the `ENGINE_` stuff.\n\nSee here for an example of it going wrong https://gist.github.com/public/8456596\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nINCLUDES = \"\"\"\n#include <openssl/engine.h>\n\"\"\"\n\nTYPES = \"\"\"\ntypedef ... ENGINE;\ntypedef ... RSA_METHOD;\ntypedef ... DSA_METHOD;\ntypedef ... ECDH_METHOD;\ntypedef ... ECDSA_METHOD;\ntypedef ... DH_METHOD;\ntypedef ... RAND_METHOD;\ntypedef ... STORE_METHOD;\ntypedef ... ENGINE_GEN_INT_FUNC_PTR;\ntypedef ... ENGINE_CTRL_FUNC_PTR;\ntypedef ... ENGINE_LOAD_KEY_PTR;\ntypedef ... ENGINE_CIPHERS_PTR;\ntypedef ... ENGINE_DIGESTS_PTR;\ntypedef ... ENGINE_CMD_DEFN;\ntypedef ... UI_METHOD;\n\nstatic const unsigned int ENGINE_METHOD_RSA;\nstatic const unsigned int ENGINE_METHOD_DSA;\nstatic const unsigned int ENGINE_METHOD_RAND;\nstatic const unsigned int ENGINE_METHOD_ECDH;\nstatic const unsigned int ENGINE_METHOD_ECDSA;\nstatic const unsigned int ENGINE_METHOD_CIPHERS;\nstatic const unsigned int ENGINE_METHOD_DIGESTS;\nstatic const unsigned int ENGINE_METHOD_STORE;\nstatic const unsigned int ENGINE_METHOD_ALL;\nstatic const unsigned int ENGINE_METHOD_NONE;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nENGINE *ENGINE_get_first(void);\nENGINE *ENGINE_get_last(void);\nENGINE *ENGINE_get_next(ENGINE *);\nENGINE *ENGINE_get_prev(ENGINE *);\nint ENGINE_add(ENGINE *);\nint ENGINE_remove(ENGINE *);\nENGINE *ENGINE_by_id(const char *);\nint ENGINE_init(ENGINE *);\nint ENGINE_finish(ENGINE *);\nvoid ENGINE_load_openssl(void);\nvoid ENGINE_load_dynamic(void);\nvoid ENGINE_load_cryptodev(void);\nvoid ENGINE_load_builtin_engines(void);\nvoid ENGINE_cleanup(void);\nENGINE *ENGINE_get_default_RSA(void);\nENGINE *ENGINE_get_default_DSA(void);\nENGINE *ENGINE_get_default_ECDH(void);\nENGINE *ENGINE_get_default_ECDSA(void);\nENGINE *ENGINE_get_default_DH(void);\nENGINE *ENGINE_get_default_RAND(void);\nENGINE *ENGINE_get_cipher_engine(int);\nENGINE *ENGINE_get_digest_engine(int);\nint ENGINE_set_default_RSA(ENGINE *);\nint ENGINE_set_default_DSA(ENGINE *);\nint ENGINE_set_default_ECDH(ENGINE *);\nint ENGINE_set_default_ECDSA(ENGINE *);\nint ENGINE_set_default_DH(ENGINE *);\nint ENGINE_set_default_RAND(ENGINE *);\nint ENGINE_set_default_ciphers(ENGINE *);\nint ENGINE_set_default_digests(ENGINE *);\nint ENGINE_set_default_string(ENGINE *, const char *);\nint ENGINE_set_default(ENGINE *, unsigned int);\nunsigned int ENGINE_get_table_flags(void);\nvoid ENGINE_set_table_flags(unsigned int);\nint ENGINE_register_RSA(ENGINE *);\nvoid ENGINE_unregister_RSA(ENGINE *);\nvoid ENGINE_register_all_RSA(void);\nint ENGINE_register_DSA(ENGINE *);\nvoid ENGINE_unregister_DSA(ENGINE *);\nvoid ENGINE_register_all_DSA(void);\nint ENGINE_register_ECDH(ENGINE *);\nvoid ENGINE_unregister_ECDH(ENGINE *);\nvoid ENGINE_register_all_ECDH(void);\nint ENGINE_register_ECDSA(ENGINE *);\nvoid ENGINE_unregister_ECDSA(ENGINE *);\nvoid ENGINE_register_all_ECDSA(void);\nint ENGINE_register_DH(ENGINE *);\nvoid ENGINE_unregister_DH(ENGINE *);\nvoid ENGINE_register_all_DH(void);\nint ENGINE_register_RAND(ENGINE *);\nvoid ENGINE_unregister_RAND(ENGINE *);\nvoid ENGINE_register_all_RAND(void);\nint ENGINE_register_STORE(ENGINE *);\nvoid ENGINE_unregister_STORE(ENGINE *);\nvoid ENGINE_register_all_STORE(void);\nint ENGINE_register_ciphers(ENGINE *);\nvoid ENGINE_unregister_ciphers(ENGINE *);\nvoid ENGINE_register_all_ciphers(void);\nint ENGINE_register_digests(ENGINE *);\nvoid ENGINE_unregister_digests(ENGINE *);\nvoid ENGINE_register_all_digests(void);\nint ENGINE_register_complete(ENGINE *);\nint ENGINE_register_all_complete(void);\nint ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));\nint ENGINE_cmd_is_executable(ENGINE *, int);\nint ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);\nint ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);\n\nENGINE *ENGINE_new(void);\nint ENGINE_free(ENGINE *);\nint ENGINE_up_ref(ENGINE *);\nint ENGINE_set_id(ENGINE *, const char *);\nint ENGINE_set_name(ENGINE *, const char *);\nint ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);\nint ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);\nint ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);\nint ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);\nint ENGINE_set_DH(ENGINE *, const DH_METHOD *);\nint ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);\nint ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);\nint ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);\nint ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);\nint ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);\nint ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);\nint ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);\nint ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);\nint ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);\nint ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);\nint ENGINE_set_flags(ENGINE *, int);\nint ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);\nconst char *ENGINE_get_id(const ENGINE *);\nconst char *ENGINE_get_name(const ENGINE *);\nconst RSA_METHOD *ENGINE_get_RSA(const ENGINE *);\nconst DSA_METHOD *ENGINE_get_DSA(const ENGINE *);\nconst ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);\nconst ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);\nconst DH_METHOD *ENGINE_get_DH(const ENGINE *);\nconst RAND_METHOD *ENGINE_get_RAND(const ENGINE *);\nconst STORE_METHOD *ENGINE_get_STORE(const ENGINE *);\n\nconst EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);\nconst EVP_MD *ENGINE_get_digest(ENGINE *, int);\nint ENGINE_get_flags(const ENGINE *);\nconst ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);\nEVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);\nEVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);\nvoid ENGINE_add_conf_module(void);\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/openssl/engine.py"}]} | 2,363 | 189 |
gh_patches_debug_20380 | rasdani/github-patches | git_diff | Pylons__pyramid-952 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spaces are no longer allowed in Mako template paths
As of Pyramid 1.4 spaces are no longer accepted in the path names when rendering mako templates (changed in this commit: https://github.com/Pylons/pyramid/commit/f71ed59edb74e9a13362521918e2660e4e4263ba#L1L79). Since no one else has noticed yet I don't know if this needs a bug fix, but it should at least be documented that the file path cannot have spaces.
For example a renderer argument to view config such as the following would fail when looking up the template:
@view_config(renderer='/templates/test space/mytemplate.mako')
</issue>
<code>
[start of pyramid/mako_templating.py]
1 import os
2 import posixpath
3 import re
4 import sys
5 import threading
6
7 from zope.interface import (
8 implementer,
9 Interface,
10 )
11
12 from pyramid.asset import (
13 resolve_asset_spec,
14 abspath_from_asset_spec,
15 )
16
17 from pyramid.compat import (
18 is_nonstr_iter,
19 reraise,
20 )
21
22 from pyramid.interfaces import ITemplateRenderer
23 from pyramid.settings import asbool
24 from pyramid.util import DottedNameResolver
25
26 from mako.lookup import TemplateLookup
27 from mako import exceptions
28
29 class IMakoLookup(Interface):
30 pass
31
32 class PkgResourceTemplateLookup(TemplateLookup):
33 """TemplateLookup subclass that handles asset specification URIs"""
34 def adjust_uri(self, uri, relativeto):
35 """Called from within a Mako template, avoids adjusting the
36 uri if it looks like an asset specification"""
37 # Don't adjust asset spec names
38 isabs = os.path.isabs(uri)
39 if (not isabs) and (':' in uri):
40 return uri
41 if not(isabs) and ('$' in uri):
42 return uri.replace('$', ':')
43 if relativeto is not None:
44 relativeto = relativeto.replace('$', ':')
45 if not(':' in uri) and (':' in relativeto):
46 if uri.startswith('/'):
47 return uri
48 pkg, relto = relativeto.split(':')
49 _uri = posixpath.join(posixpath.dirname(relto), uri)
50 return '{0}:{1}'.format(pkg, _uri)
51 if not(':' in uri) and not(':' in relativeto):
52 return posixpath.join(posixpath.dirname(relativeto), uri)
53 return TemplateLookup.adjust_uri(self, uri, relativeto)
54
55 def get_template(self, uri):
56 """Fetch a template from the cache, or check the filesystem
57 for it
58
59 In addition to the basic filesystem lookup, this subclass will
60 use pkg_resource to load a file using the asset
61 specification syntax.
62
63 """
64 isabs = os.path.isabs(uri)
65 if (not isabs) and (':' in uri):
66 # Windows can't cope with colons in filenames, so we replace the
67 # colon with a dollar sign in the filename mako uses to actually
68 # store the generated python code in the mako module_directory or
69 # in the temporary location of mako's modules
70 adjusted = uri.replace(':', '$')
71 try:
72 if self.filesystem_checks:
73 return self._check(adjusted, self._collection[adjusted])
74 else:
75 return self._collection[adjusted]
76 except KeyError:
77 pname, path = resolve_asset_spec(uri)
78 srcfile = abspath_from_asset_spec(path, pname)
79 if os.path.isfile(srcfile):
80 return self._load(srcfile, adjusted)
81 raise exceptions.TopLevelLookupException(
82 "Can not locate template for uri %r" % uri)
83 return TemplateLookup.get_template(self, uri)
84
85
86 registry_lock = threading.Lock()
87
88 class MakoRendererFactoryHelper(object):
89 def __init__(self, settings_prefix=None):
90 self.settings_prefix = settings_prefix
91
92 def __call__(self, info):
93 p = re.compile(
94 r'(?P<asset>[\w_.:/-]+)'
95 r'(?:\#(?P<defname>[\w_]+))?'
96 r'(\.(?P<ext>.*))'
97 )
98 asset, defname, ext = p.match(info.name).group(
99 'asset', 'defname', 'ext'
100 )
101 path = '%s.%s' % (asset, ext)
102 registry = info.registry
103 settings = info.settings
104 settings_prefix = self.settings_prefix
105
106 if settings_prefix is None:
107 settings_prefix = info.type +'.'
108
109 lookup = registry.queryUtility(IMakoLookup, name=settings_prefix)
110
111 def sget(name, default=None):
112 return settings.get(settings_prefix + name, default)
113
114 if lookup is None:
115 reload_templates = settings.get('pyramid.reload_templates', None)
116 if reload_templates is None:
117 reload_templates = settings.get('reload_templates', False)
118 reload_templates = asbool(reload_templates)
119 directories = sget('directories', [])
120 module_directory = sget('module_directory', None)
121 input_encoding = sget('input_encoding', 'utf-8')
122 error_handler = sget('error_handler', None)
123 default_filters = sget('default_filters', 'h')
124 imports = sget('imports', None)
125 strict_undefined = asbool(sget('strict_undefined', False))
126 preprocessor = sget('preprocessor', None)
127 if not is_nonstr_iter(directories):
128 directories = list(filter(None, directories.splitlines()))
129 directories = [ abspath_from_asset_spec(d) for d in directories ]
130 if module_directory is not None:
131 module_directory = abspath_from_asset_spec(module_directory)
132 if error_handler is not None:
133 dotted = DottedNameResolver(info.package)
134 error_handler = dotted.maybe_resolve(error_handler)
135 if default_filters is not None:
136 if not is_nonstr_iter(default_filters):
137 default_filters = list(filter(
138 None, default_filters.splitlines()))
139 if imports is not None:
140 if not is_nonstr_iter(imports):
141 imports = list(filter(None, imports.splitlines()))
142 if preprocessor is not None:
143 dotted = DottedNameResolver(info.package)
144 preprocessor = dotted.maybe_resolve(preprocessor)
145
146
147 lookup = PkgResourceTemplateLookup(
148 directories=directories,
149 module_directory=module_directory,
150 input_encoding=input_encoding,
151 error_handler=error_handler,
152 default_filters=default_filters,
153 imports=imports,
154 filesystem_checks=reload_templates,
155 strict_undefined=strict_undefined,
156 preprocessor=preprocessor
157 )
158
159 with registry_lock:
160 registry.registerUtility(lookup, IMakoLookup,
161 name=settings_prefix)
162
163 return MakoLookupTemplateRenderer(path, defname, lookup)
164
165 renderer_factory = MakoRendererFactoryHelper('mako.')
166
167 class MakoRenderingException(Exception):
168 def __init__(self, text):
169 self.text = text
170
171 def __repr__(self):
172 return self.text
173
174 __str__ = __repr__
175
176 @implementer(ITemplateRenderer)
177 class MakoLookupTemplateRenderer(object):
178 """ Render a :term:`Mako` template using the template
179 implied by the ``path`` argument.The ``path`` argument may be a
180 package-relative path, an absolute path, or a :term:`asset
181 specification`. If a defname is defined, in the form of
182 package:path/to/template#defname.mako, a function named ``defname``
183 inside the template will then be rendered.
184 """
185 def __init__(self, path, defname, lookup):
186 self.path = path
187 self.defname = defname
188 self.lookup = lookup
189
190 def implementation(self):
191 return self.lookup.get_template(self.path)
192
193 def __call__(self, value, system):
194 context = system.pop('context', None)
195 if context is not None:
196 system['_context'] = context
197 if self.defname is None:
198 if isinstance(value, tuple):
199 self.defname, value = value
200 else:
201 if isinstance(value, tuple):
202 _, value = value
203 try:
204 system.update(value)
205 except (TypeError, ValueError):
206 raise ValueError('renderer was passed non-dictionary as value')
207 template = self.implementation()
208 if self.defname is not None:
209 template = template.get_def(self.defname)
210 try:
211 result = template.render_unicode(**system)
212 except:
213 try:
214 exc_info = sys.exc_info()
215 errtext = exceptions.text_error_template().render(
216 error=exc_info[1],
217 traceback=exc_info[2]
218 )
219 reraise(MakoRenderingException(errtext), None, exc_info[2])
220 finally:
221 del exc_info
222
223 return result
224
[end of pyramid/mako_templating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/mako_templating.py b/pyramid/mako_templating.py
--- a/pyramid/mako_templating.py
+++ b/pyramid/mako_templating.py
@@ -82,7 +82,6 @@
"Can not locate template for uri %r" % uri)
return TemplateLookup.get_template(self, uri)
-
registry_lock = threading.Lock()
class MakoRendererFactoryHelper(object):
@@ -90,14 +89,11 @@
self.settings_prefix = settings_prefix
def __call__(self, info):
- p = re.compile(
- r'(?P<asset>[\w_.:/-]+)'
- r'(?:\#(?P<defname>[\w_]+))?'
- r'(\.(?P<ext>.*))'
- )
- asset, defname, ext = p.match(info.name).group(
- 'asset', 'defname', 'ext'
- )
+ defname = None
+ asset, ext = info.name.rsplit('.', 1)
+ if '#' in asset:
+ asset, defname = asset.rsplit('#', 1)
+
path = '%s.%s' % (asset, ext)
registry = info.registry
settings = info.settings
| {"golden_diff": "diff --git a/pyramid/mako_templating.py b/pyramid/mako_templating.py\n--- a/pyramid/mako_templating.py\n+++ b/pyramid/mako_templating.py\n@@ -82,7 +82,6 @@\n \"Can not locate template for uri %r\" % uri)\n return TemplateLookup.get_template(self, uri)\n \n-\n registry_lock = threading.Lock()\n \n class MakoRendererFactoryHelper(object):\n@@ -90,14 +89,11 @@\n self.settings_prefix = settings_prefix\n \n def __call__(self, info):\n- p = re.compile(\n- r'(?P<asset>[\\w_.:/-]+)'\n- r'(?:\\#(?P<defname>[\\w_]+))?'\n- r'(\\.(?P<ext>.*))'\n- )\n- asset, defname, ext = p.match(info.name).group(\n- 'asset', 'defname', 'ext'\n- )\n+ defname = None\n+ asset, ext = info.name.rsplit('.', 1)\n+ if '#' in asset:\n+ asset, defname = asset.rsplit('#', 1)\n+\n path = '%s.%s' % (asset, ext)\n registry = info.registry\n settings = info.settings\n", "issue": "Spaces are no longer allowed in Mako template paths\nAs of Pyramid 1.4 spaces are no longer accepted in the path names when rendering mako templates (changed in this commit: https://github.com/Pylons/pyramid/commit/f71ed59edb74e9a13362521918e2660e4e4263ba#L1L79). Since no one else has noticed yet I don't know if this needs a bug fix, but it should at least be documented that the file path cannot have spaces. \n\nFor example a renderer argument to view config such as the following would fail when looking up the template:\n@view_config(renderer='/templates/test space/mytemplate.mako') \n\n", "before_files": [{"content": "import os\nimport posixpath\nimport re\nimport sys\nimport threading\n\nfrom zope.interface import (\n implementer,\n Interface,\n )\n\nfrom pyramid.asset import (\n resolve_asset_spec,\n abspath_from_asset_spec,\n )\n\nfrom pyramid.compat import (\n is_nonstr_iter,\n reraise,\n )\n\nfrom pyramid.interfaces import ITemplateRenderer\nfrom pyramid.settings import asbool\nfrom pyramid.util import DottedNameResolver\n\nfrom mako.lookup import TemplateLookup\nfrom mako import exceptions\n\nclass IMakoLookup(Interface):\n pass\n\nclass PkgResourceTemplateLookup(TemplateLookup):\n \"\"\"TemplateLookup subclass that handles asset specification URIs\"\"\"\n def adjust_uri(self, uri, relativeto):\n \"\"\"Called from within a Mako template, avoids adjusting the\n uri if it looks like an asset specification\"\"\"\n # Don't adjust asset spec names\n isabs = os.path.isabs(uri)\n if (not isabs) and (':' in uri):\n return uri\n if not(isabs) and ('$' in uri):\n return uri.replace('$', ':')\n if relativeto is not None:\n relativeto = relativeto.replace('$', ':')\n if not(':' in uri) and (':' in relativeto):\n if uri.startswith('/'):\n return uri\n pkg, relto = relativeto.split(':')\n _uri = posixpath.join(posixpath.dirname(relto), uri)\n return '{0}:{1}'.format(pkg, _uri)\n if not(':' in uri) and not(':' in relativeto):\n return posixpath.join(posixpath.dirname(relativeto), uri)\n return TemplateLookup.adjust_uri(self, uri, relativeto)\n\n def get_template(self, uri):\n \"\"\"Fetch a template from the cache, or check the filesystem\n for it\n\n In addition to the basic filesystem lookup, this subclass will\n use pkg_resource to load a file using the asset\n specification syntax.\n\n \"\"\"\n isabs = os.path.isabs(uri)\n if (not isabs) and (':' in uri):\n # Windows can't cope with colons in filenames, so we replace the\n # colon with a dollar sign in the filename mako uses to actually\n # store the generated python code in the mako module_directory or\n # in the temporary location of mako's modules\n adjusted = uri.replace(':', '$')\n try:\n if self.filesystem_checks:\n return self._check(adjusted, self._collection[adjusted])\n else:\n return self._collection[adjusted]\n except KeyError:\n pname, path = resolve_asset_spec(uri)\n srcfile = abspath_from_asset_spec(path, pname)\n if os.path.isfile(srcfile):\n return self._load(srcfile, adjusted)\n raise exceptions.TopLevelLookupException(\n \"Can not locate template for uri %r\" % uri)\n return TemplateLookup.get_template(self, uri)\n\n\nregistry_lock = threading.Lock()\n\nclass MakoRendererFactoryHelper(object):\n def __init__(self, settings_prefix=None):\n self.settings_prefix = settings_prefix\n\n def __call__(self, info):\n p = re.compile(\n r'(?P<asset>[\\w_.:/-]+)'\n r'(?:\\#(?P<defname>[\\w_]+))?'\n r'(\\.(?P<ext>.*))'\n )\n asset, defname, ext = p.match(info.name).group(\n 'asset', 'defname', 'ext'\n )\n path = '%s.%s' % (asset, ext)\n registry = info.registry\n settings = info.settings\n settings_prefix = self.settings_prefix\n\n if settings_prefix is None:\n settings_prefix = info.type +'.'\n\n lookup = registry.queryUtility(IMakoLookup, name=settings_prefix)\n\n def sget(name, default=None):\n return settings.get(settings_prefix + name, default)\n\n if lookup is None:\n reload_templates = settings.get('pyramid.reload_templates', None)\n if reload_templates is None:\n reload_templates = settings.get('reload_templates', False)\n reload_templates = asbool(reload_templates)\n directories = sget('directories', [])\n module_directory = sget('module_directory', None)\n input_encoding = sget('input_encoding', 'utf-8')\n error_handler = sget('error_handler', None)\n default_filters = sget('default_filters', 'h')\n imports = sget('imports', None)\n strict_undefined = asbool(sget('strict_undefined', False))\n preprocessor = sget('preprocessor', None)\n if not is_nonstr_iter(directories):\n directories = list(filter(None, directories.splitlines()))\n directories = [ abspath_from_asset_spec(d) for d in directories ]\n if module_directory is not None:\n module_directory = abspath_from_asset_spec(module_directory)\n if error_handler is not None:\n dotted = DottedNameResolver(info.package)\n error_handler = dotted.maybe_resolve(error_handler)\n if default_filters is not None:\n if not is_nonstr_iter(default_filters):\n default_filters = list(filter(\n None, default_filters.splitlines()))\n if imports is not None:\n if not is_nonstr_iter(imports):\n imports = list(filter(None, imports.splitlines()))\n if preprocessor is not None:\n dotted = DottedNameResolver(info.package)\n preprocessor = dotted.maybe_resolve(preprocessor)\n\n\n lookup = PkgResourceTemplateLookup(\n directories=directories,\n module_directory=module_directory,\n input_encoding=input_encoding,\n error_handler=error_handler,\n default_filters=default_filters,\n imports=imports,\n filesystem_checks=reload_templates,\n strict_undefined=strict_undefined,\n preprocessor=preprocessor\n )\n\n with registry_lock:\n registry.registerUtility(lookup, IMakoLookup,\n name=settings_prefix)\n\n return MakoLookupTemplateRenderer(path, defname, lookup)\n\nrenderer_factory = MakoRendererFactoryHelper('mako.')\n\nclass MakoRenderingException(Exception):\n def __init__(self, text):\n self.text = text\n\n def __repr__(self):\n return self.text\n\n __str__ = __repr__\n\n@implementer(ITemplateRenderer)\nclass MakoLookupTemplateRenderer(object):\n \"\"\" Render a :term:`Mako` template using the template\n implied by the ``path`` argument.The ``path`` argument may be a\n package-relative path, an absolute path, or a :term:`asset\n specification`. If a defname is defined, in the form of\n package:path/to/template#defname.mako, a function named ``defname``\n inside the template will then be rendered.\n \"\"\"\n def __init__(self, path, defname, lookup):\n self.path = path\n self.defname = defname\n self.lookup = lookup\n\n def implementation(self):\n return self.lookup.get_template(self.path)\n\n def __call__(self, value, system):\n context = system.pop('context', None)\n if context is not None:\n system['_context'] = context\n if self.defname is None:\n if isinstance(value, tuple):\n self.defname, value = value\n else:\n if isinstance(value, tuple):\n _, value = value\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n template = self.implementation()\n if self.defname is not None:\n template = template.get_def(self.defname)\n try:\n result = template.render_unicode(**system)\n except:\n try:\n exc_info = sys.exc_info()\n errtext = exceptions.text_error_template().render(\n error=exc_info[1],\n traceback=exc_info[2]\n )\n reraise(MakoRenderingException(errtext), None, exc_info[2])\n finally:\n del exc_info\n\n return result\n", "path": "pyramid/mako_templating.py"}]} | 2,972 | 288 |
gh_patches_debug_36052 | rasdani/github-patches | git_diff | DDMAL__CantusDB-627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide certain fields in admin area
Several of our models' fields are designed to improve database operation, or to preserve an archive of past information, and should not be edited by users. These fields include `json_info` (present in several models - i.e. the response from OldCantus from when we last synced the data), `next_chant` (Chant, Sequence), `is_last_chant_in_feast` (Chant, Sequence) `number_of_chants` and `number_of_melodies` (Source); there may be others.
We can `exclude` these fields in our Admin configuration - this shouldn't be too hard to fix.
Hide certain fields in admin area
Several of our models' fields are designed to improve database operation, or to preserve an archive of past information, and should not be edited by users. These fields include `json_info` (present in several models - i.e. the response from OldCantus from when we last synced the data), `next_chant` (Chant, Sequence), `is_last_chant_in_feast` (Chant, Sequence) `number_of_chants` and `number_of_melodies` (Source); there may be others.
We can `exclude` these fields in our Admin configuration - this shouldn't be too hard to fix.
</issue>
<code>
[start of django/cantusdb_project/main_app/models/base_chant.py]
1 from main_app.models import BaseModel
2 from django.db import models
3 from django.contrib.auth import get_user_model
4 from django.contrib.postgres.search import SearchVectorField
5
6
7 class BaseChant(BaseModel):
8 """
9 the Chant and Sequence models inherit from BaseChant.
10
11 Both Chant and Sequence must have the same fields, otherwise errors will be raised
12 when a user searches for chants/sequences in the database. Thus, all fields,
13 properties and attributes should be declared in BaseChant in order to keep the two
14 models harmonized, even if only one of the two models uses a particular field.
15 """
16
17 class Meta:
18 abstract = True
19
20 # The "visible_status" field corresponds to the "status" field on old Cantus
21 visible_status = models.CharField(max_length=1, blank=True, null=True)
22 # For chants, the old Cantus "title" field (in json export) is used to populate the new Cantus "incipit" field,
23 # For sequences, the old Cantus "title" field is used to populate the new Cantus "title" field,
24 # and the old Cantus "incipit" field is used to populate the new Cantus "incipit" field.
25 title = models.CharField(blank=True, null=True, max_length=255)
26 incipit = models.CharField(blank=True, null=True, max_length=255)
27 siglum = models.CharField(blank=True, null=True, max_length=255)
28 folio = models.CharField(
29 help_text="Binding order", blank=True, null=True, max_length=255, db_index=True
30 )
31 # The "s_sequence" char field, used for Sequences, is used to indicate the relative positions of sequences on the page.
32 # It sometimes features non-numeric characters and leading zeroes, so it's a CharField.
33 s_sequence = models.CharField(blank=True, null=True, max_length=255)
34 # The "c_sequence" integer field, used for Chants, similarly indicates the relative positions of chants on the page
35 c_sequence = models.PositiveIntegerField(
36 help_text='Each folio starts with "1"', null=True, blank=True
37 )
38 genre = models.ForeignKey("Genre", blank=True, null=True, on_delete=models.PROTECT)
39 rubrics = models.CharField(blank=True, null=True, max_length=255)
40 analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)
41 indexing_notes = models.TextField(blank=True, null=True)
42 date = models.CharField(blank=True, null=True, max_length=255)
43 col1 = models.CharField(blank=True, null=True, max_length=255)
44 col2 = models.CharField(blank=True, null=True, max_length=255)
45 col3 = models.CharField(blank=True, null=True, max_length=255)
46 ah_volume = models.CharField(blank=True, null=True, max_length=255)
47 # Note that some chants do not have a source
48 source = models.ForeignKey(
49 "Source", on_delete=models.CASCADE, null=True, blank=True
50 ) # PROTECT so that we can't delete a source with chants in it
51 cantus_id = models.CharField(blank=True, null=True, max_length=255, db_index=True)
52 image_link = models.URLField(blank=True, null=True)
53 json_info = models.JSONField(null=True, blank=True)
54 marginalia = models.CharField(max_length=63, null=True, blank=True)
55 office = models.ForeignKey(
56 "Office", on_delete=models.PROTECT, null=True, blank=True
57 )
58 position = models.CharField(max_length=63, null=True, blank=True)
59 feast = models.ForeignKey("Feast", on_delete=models.PROTECT, null=True, blank=True)
60 mode = models.CharField(max_length=63, null=True, blank=True)
61 differentia = models.CharField(blank=True, null=True, max_length=63)
62 differentia_new = models.CharField(blank=True, null=True, max_length=12)
63 finalis = models.CharField(blank=True, null=True, max_length=63)
64 extra = models.CharField(blank=True, null=True, max_length=63)
65 chant_range = models.CharField(
66 blank=True,
67 null=True,
68 help_text='Example: "1-c-k-4". Optional field',
69 max_length=255,
70 )
71 addendum = models.CharField(blank=True, null=True, max_length=255)
72 manuscript_full_text_std_spelling = models.TextField(
73 help_text="Manuscript full text with standardized spelling. Enter the words "
74 "according to the manuscript but normalize their spellings following "
75 "Classical Latin forms. Use upper-case letters for proper nouns, "
76 'the first word of each chant, and the first word after "Alleluia" for '
77 "Mass Alleluias. Punctuation is omitted.",
78 null=True,
79 blank=True,
80 )
81 manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)
82 manuscript_full_text = models.TextField(
83 help_text="Enter the wording, word order and spellings as found in the manuscript"
84 ", with abbreviations resolved to standard words. Use upper-case letters as found"
85 " in the source. Retain “Xpistum” (Christum), “Ihc” (Jesus) and other instances of "
86 "Greek characters with their closest approximations of Latin letters. Some punctuation"
87 " signs and vertical dividing lines | are employed in this field. Repetenda and psalm "
88 "cues can also be recorded here.",
89 null=True,
90 blank=True,
91 )
92 manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)
93 manuscript_syllabized_full_text = models.TextField(null=True, blank=True)
94 volpiano = models.TextField(null=True, blank=True)
95 volpiano_proofread = models.BooleanField(blank=True, null=True)
96 # The "volpiano_notes" and "volpiano_intervals" field are added in new Cantus to aid melody search.
97 # "volpiano_notes" is extracted from the "volpiano" field, by eliminating all non-note characters
98 # and removing consecutive repeated notes.
99 # "volpiano_intervals" is extracted from the "volpiano_notes" field.
100 # It records the intervals between any two adjacent volpiano notes.
101 volpiano_notes = models.TextField(null=True, blank=True)
102 volpiano_intervals = models.TextField(null=True, blank=True)
103
104 # NB: the cao_concordances field should not be used in public-facing views, as it contains data that may be out-of-date.
105 # For more information, see https://github.com/DDMAL/CantusDB/wiki/BaseChant-Model
106 cao_concordances = models.CharField(
107 blank=True, null=True, max_length=63
108 ) # !! see lines immediately above
109 proofread_by = models.ManyToManyField(get_user_model(), blank=True)
110 melody_id = models.CharField(blank=True, null=True, max_length=63)
111 search_vector = SearchVectorField(null=True, editable=False)
112 content_structure = models.CharField(
113 blank=True,
114 null=True,
115 max_length=64,
116 help_text="Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.",
117 )
118 next_chant = models.OneToOneField(
119 "self",
120 related_name="prev_chant",
121 null=True,
122 blank=True,
123 on_delete=models.SET_NULL,
124 )
125 # prev_chant = ... # prev_chant is created when next_chant is calculated
126
127 # this field, populated by the populate_is_last_chant_in_feast script, exists in order to optimize .get_suggested_feasts() on the chant-create page
128 is_last_chant_in_feast = models.BooleanField(blank=True, null=True)
129
130 # fragmentarium_id = models.CharField(blank=True, null=True, max_length=64)
131 # # Digital Analysis of Chant Transmission
132 # dact = models.CharField(blank=True, null=True, max_length=64)
133 # also a second differentia field
134
135 def get_ci_url(self) -> str:
136 """Construct the url to the entry in Cantus Index correponding to the chant.
137
138 Returns:
139 str: The url to the Cantus Index page
140 """
141 return f"http://cantusindex.org/id/{self.cantus_id}"
142
143 def __str__(self):
144 incipit = ""
145 if self.incipit:
146 incipit = self.incipit
147 elif self.manuscript_full_text:
148 split_text = self.manuscript_full_text.split()
149 incipit = " ".join(split_text[:4])
150 return '"{incip}" ({id})'.format(incip=incipit, id=self.id)
151
[end of django/cantusdb_project/main_app/models/base_chant.py]
[start of django/cantusdb_project/main_app/admin.py]
1 from django.contrib import admin
2 from main_app.models import *
3
4 # Register your models here.
5
6
7 class BaseModelAdmin(admin.ModelAdmin):
8 # these fields should not be editable
9 exclude = ("created_by", "last_updated_by")
10
11 # if an object is created in the admin interface, assign the user to the created_by field
12 # else if an object is updated in the admin interface, assign the user to the last_updated_by field
13 def save_model(self, request, obj, form, change):
14 if change:
15 obj.last_updated_by = request.user
16 else:
17 obj.created_by = request.user
18 super().save_model(request, obj, form, change)
19
20
21 class CenturyAdmin(BaseModelAdmin):
22 pass
23
24
25 class ChantAdmin(BaseModelAdmin):
26 pass
27
28
29 class FeastAdmin(BaseModelAdmin):
30 pass
31
32
33 class GenreAdmin(BaseModelAdmin):
34 pass
35
36
37 class NotationAdmin(BaseModelAdmin):
38 pass
39
40
41 class OfficeAdmin(BaseModelAdmin):
42 pass
43
44
45 class ProvenanceAdmin(BaseModelAdmin):
46 pass
47
48
49 class RismSiglumAdmin(BaseModelAdmin):
50 pass
51
52
53 class SegmentAdmin(BaseModelAdmin):
54 pass
55
56
57 class SequenceAdmin(BaseModelAdmin):
58 pass
59
60
61 class SourceAdmin(BaseModelAdmin):
62 # from the Django docs:
63 # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface
64 # that allows searching within the options. The unselected and selected options appear in two boxes side by side.
65 filter_horizontal = (
66 "century",
67 "notation",
68 "current_editors",
69 "inventoried_by",
70 "full_text_entered_by",
71 "melodies_entered_by",
72 "proofreaders",
73 "other_editors",
74 )
75
76
77 admin.site.register(Century, CenturyAdmin)
78 admin.site.register(Chant, ChantAdmin)
79 admin.site.register(Feast, FeastAdmin)
80 admin.site.register(Genre, GenreAdmin)
81 admin.site.register(Notation, NotationAdmin)
82 admin.site.register(Office, OfficeAdmin)
83 admin.site.register(Provenance, ProvenanceAdmin)
84 admin.site.register(RismSiglum, RismSiglumAdmin)
85 admin.site.register(Segment, SegmentAdmin)
86 admin.site.register(Sequence, SequenceAdmin)
87 admin.site.register(Source, SourceAdmin)
88
[end of django/cantusdb_project/main_app/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py
--- a/django/cantusdb_project/main_app/admin.py
+++ b/django/cantusdb_project/main_app/admin.py
@@ -1,12 +1,11 @@
from django.contrib import admin
from main_app.models import *
-# Register your models here.
-
+# these fields should not be editable by all classes
+EXCLUDE = ("created_by", "last_updated_by", "json_info")
class BaseModelAdmin(admin.ModelAdmin):
- # these fields should not be editable
- exclude = ("created_by", "last_updated_by")
+ exclude = EXCLUDE
# if an object is created in the admin interface, assign the user to the created_by field
# else if an object is updated in the admin interface, assign the user to the last_updated_by field
@@ -23,8 +22,7 @@
class ChantAdmin(BaseModelAdmin):
- pass
-
+ exclude = EXCLUDE + ("col1", "col2", "col3", "next_chant", "s_sequence", "is_last_chant_in_feast")
class FeastAdmin(BaseModelAdmin):
pass
@@ -55,8 +53,7 @@
class SequenceAdmin(BaseModelAdmin):
- pass
-
+ exclude = EXCLUDE + ("c_sequence", "next_chant", "is_last_chant_in_feast")
class SourceAdmin(BaseModelAdmin):
# from the Django docs:
diff --git a/django/cantusdb_project/main_app/models/base_chant.py b/django/cantusdb_project/main_app/models/base_chant.py
--- a/django/cantusdb_project/main_app/models/base_chant.py
+++ b/django/cantusdb_project/main_app/models/base_chant.py
@@ -30,9 +30,9 @@
)
# The "s_sequence" char field, used for Sequences, is used to indicate the relative positions of sequences on the page.
# It sometimes features non-numeric characters and leading zeroes, so it's a CharField.
- s_sequence = models.CharField(blank=True, null=True, max_length=255)
+ s_sequence = models.CharField("Sequence", blank=True, null=True, max_length=255)
# The "c_sequence" integer field, used for Chants, similarly indicates the relative positions of chants on the page
- c_sequence = models.PositiveIntegerField(
+ c_sequence = models.PositiveIntegerField("Sequence",
help_text='Each folio starts with "1"', null=True, blank=True
)
genre = models.ForeignKey("Genre", blank=True, null=True, on_delete=models.PROTECT)
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -1,12 +1,11 @@\n from django.contrib import admin\n from main_app.models import *\n \n-# Register your models here.\n-\n+# these fields should not be editable by all classes\n+EXCLUDE = (\"created_by\", \"last_updated_by\", \"json_info\")\n \n class BaseModelAdmin(admin.ModelAdmin):\n- # these fields should not be editable\n- exclude = (\"created_by\", \"last_updated_by\")\n+ exclude = EXCLUDE\n \n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n@@ -23,8 +22,7 @@\n \n \n class ChantAdmin(BaseModelAdmin):\n- pass\n-\n+ exclude = EXCLUDE + (\"col1\", \"col2\", \"col3\", \"next_chant\", \"s_sequence\", \"is_last_chant_in_feast\")\n \n class FeastAdmin(BaseModelAdmin):\n pass\n@@ -55,8 +53,7 @@\n \n \n class SequenceAdmin(BaseModelAdmin):\n- pass\n-\n+ exclude = EXCLUDE + (\"c_sequence\", \"next_chant\", \"is_last_chant_in_feast\")\n \n class SourceAdmin(BaseModelAdmin):\n # from the Django docs:\ndiff --git a/django/cantusdb_project/main_app/models/base_chant.py b/django/cantusdb_project/main_app/models/base_chant.py\n--- a/django/cantusdb_project/main_app/models/base_chant.py\n+++ b/django/cantusdb_project/main_app/models/base_chant.py\n@@ -30,9 +30,9 @@\n )\n # The \"s_sequence\" char field, used for Sequences, is used to indicate the relative positions of sequences on the page.\n # It sometimes features non-numeric characters and leading zeroes, so it's a CharField.\n- s_sequence = models.CharField(blank=True, null=True, max_length=255)\n+ s_sequence = models.CharField(\"Sequence\", blank=True, null=True, max_length=255)\n # The \"c_sequence\" integer field, used for Chants, similarly indicates the relative positions of chants on the page\n- c_sequence = models.PositiveIntegerField(\n+ c_sequence = models.PositiveIntegerField(\"Sequence\",\n help_text='Each folio starts with \"1\"', null=True, blank=True\n )\n genre = models.ForeignKey(\"Genre\", blank=True, null=True, on_delete=models.PROTECT)\n", "issue": "Hide certain fields in admin area\nSeveral of our models' fields are designed to improve database operation, or to preserve an archive of past information, and should not be edited by users. These fields include `json_info` (present in several models - i.e. the response from OldCantus from when we last synced the data), `next_chant` (Chant, Sequence), `is_last_chant_in_feast` (Chant, Sequence) `number_of_chants` and `number_of_melodies` (Source); there may be others.\r\n\r\nWe can `exclude` these fields in our Admin configuration - this shouldn't be too hard to fix.\nHide certain fields in admin area\nSeveral of our models' fields are designed to improve database operation, or to preserve an archive of past information, and should not be edited by users. These fields include `json_info` (present in several models - i.e. the response from OldCantus from when we last synced the data), `next_chant` (Chant, Sequence), `is_last_chant_in_feast` (Chant, Sequence) `number_of_chants` and `number_of_melodies` (Source); there may be others.\r\n\r\nWe can `exclude` these fields in our Admin configuration - this shouldn't be too hard to fix.\n", "before_files": [{"content": "from main_app.models import BaseModel\nfrom django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.postgres.search import SearchVectorField\n\n\nclass BaseChant(BaseModel):\n \"\"\"\n the Chant and Sequence models inherit from BaseChant.\n\n Both Chant and Sequence must have the same fields, otherwise errors will be raised\n when a user searches for chants/sequences in the database. Thus, all fields,\n properties and attributes should be declared in BaseChant in order to keep the two\n models harmonized, even if only one of the two models uses a particular field.\n \"\"\"\n\n class Meta:\n abstract = True\n\n # The \"visible_status\" field corresponds to the \"status\" field on old Cantus\n visible_status = models.CharField(max_length=1, blank=True, null=True)\n # For chants, the old Cantus \"title\" field (in json export) is used to populate the new Cantus \"incipit\" field,\n # For sequences, the old Cantus \"title\" field is used to populate the new Cantus \"title\" field,\n # and the old Cantus \"incipit\" field is used to populate the new Cantus \"incipit\" field.\n title = models.CharField(blank=True, null=True, max_length=255)\n incipit = models.CharField(blank=True, null=True, max_length=255)\n siglum = models.CharField(blank=True, null=True, max_length=255)\n folio = models.CharField(\n help_text=\"Binding order\", blank=True, null=True, max_length=255, db_index=True\n )\n # The \"s_sequence\" char field, used for Sequences, is used to indicate the relative positions of sequences on the page.\n # It sometimes features non-numeric characters and leading zeroes, so it's a CharField.\n s_sequence = models.CharField(blank=True, null=True, max_length=255)\n # The \"c_sequence\" integer field, used for Chants, similarly indicates the relative positions of chants on the page\n c_sequence = models.PositiveIntegerField(\n help_text='Each folio starts with \"1\"', null=True, blank=True\n )\n genre = models.ForeignKey(\"Genre\", blank=True, null=True, on_delete=models.PROTECT)\n rubrics = models.CharField(blank=True, null=True, max_length=255)\n analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)\n indexing_notes = models.TextField(blank=True, null=True)\n date = models.CharField(blank=True, null=True, max_length=255)\n col1 = models.CharField(blank=True, null=True, max_length=255)\n col2 = models.CharField(blank=True, null=True, max_length=255)\n col3 = models.CharField(blank=True, null=True, max_length=255)\n ah_volume = models.CharField(blank=True, null=True, max_length=255)\n # Note that some chants do not have a source\n source = models.ForeignKey(\n \"Source\", on_delete=models.CASCADE, null=True, blank=True\n ) # PROTECT so that we can't delete a source with chants in it\n cantus_id = models.CharField(blank=True, null=True, max_length=255, db_index=True)\n image_link = models.URLField(blank=True, null=True)\n json_info = models.JSONField(null=True, blank=True)\n marginalia = models.CharField(max_length=63, null=True, blank=True)\n office = models.ForeignKey(\n \"Office\", on_delete=models.PROTECT, null=True, blank=True\n )\n position = models.CharField(max_length=63, null=True, blank=True)\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n differentia_new = models.CharField(blank=True, null=True, max_length=12)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n blank=True,\n null=True,\n help_text='Example: \"1-c-k-4\". Optional field',\n max_length=255,\n )\n addendum = models.CharField(blank=True, null=True, max_length=255)\n manuscript_full_text_std_spelling = models.TextField(\n help_text=\"Manuscript full text with standardized spelling. Enter the words \"\n \"according to the manuscript but normalize their spellings following \"\n \"Classical Latin forms. Use upper-case letters for proper nouns, \"\n 'the first word of each chant, and the first word after \"Alleluia\" for '\n \"Mass Alleluias. Punctuation is omitted.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)\n manuscript_full_text = models.TextField(\n help_text=\"Enter the wording, word order and spellings as found in the manuscript\"\n \", with abbreviations resolved to standard words. Use upper-case letters as found\"\n \" in the source. Retain \u201cXpistum\u201d (Christum), \u201cIhc\u201d (Jesus) and other instances of \"\n \"Greek characters with their closest approximations of Latin letters. Some punctuation\"\n \" signs and vertical dividing lines | are employed in this field. Repetenda and psalm \"\n \"cues can also be recorded here.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)\n manuscript_syllabized_full_text = models.TextField(null=True, blank=True)\n volpiano = models.TextField(null=True, blank=True)\n volpiano_proofread = models.BooleanField(blank=True, null=True)\n # The \"volpiano_notes\" and \"volpiano_intervals\" field are added in new Cantus to aid melody search.\n # \"volpiano_notes\" is extracted from the \"volpiano\" field, by eliminating all non-note characters\n # and removing consecutive repeated notes.\n # \"volpiano_intervals\" is extracted from the \"volpiano_notes\" field.\n # It records the intervals between any two adjacent volpiano notes.\n volpiano_notes = models.TextField(null=True, blank=True)\n volpiano_intervals = models.TextField(null=True, blank=True)\n\n # NB: the cao_concordances field should not be used in public-facing views, as it contains data that may be out-of-date.\n # For more information, see https://github.com/DDMAL/CantusDB/wiki/BaseChant-Model\n cao_concordances = models.CharField(\n blank=True, null=True, max_length=63\n ) # !! see lines immediately above\n proofread_by = models.ManyToManyField(get_user_model(), blank=True)\n melody_id = models.CharField(blank=True, null=True, max_length=63)\n search_vector = SearchVectorField(null=True, editable=False)\n content_structure = models.CharField(\n blank=True,\n null=True,\n max_length=64,\n help_text=\"Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.\",\n )\n next_chant = models.OneToOneField(\n \"self\",\n related_name=\"prev_chant\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n # prev_chant = ... # prev_chant is created when next_chant is calculated\n\n # this field, populated by the populate_is_last_chant_in_feast script, exists in order to optimize .get_suggested_feasts() on the chant-create page\n is_last_chant_in_feast = models.BooleanField(blank=True, null=True)\n\n # fragmentarium_id = models.CharField(blank=True, null=True, max_length=64)\n # # Digital Analysis of Chant Transmission\n # dact = models.CharField(blank=True, null=True, max_length=64)\n # also a second differentia field\n\n def get_ci_url(self) -> str:\n \"\"\"Construct the url to the entry in Cantus Index correponding to the chant.\n\n Returns:\n str: The url to the Cantus Index page\n \"\"\"\n return f\"http://cantusindex.org/id/{self.cantus_id}\"\n\n def __str__(self):\n incipit = \"\"\n if self.incipit:\n incipit = self.incipit\n elif self.manuscript_full_text:\n split_text = self.manuscript_full_text.split()\n incipit = \" \".join(split_text[:4])\n return '\"{incip}\" ({id})'.format(incip=incipit, id=self.id)\n", "path": "django/cantusdb_project/main_app/models/base_chant.py"}, {"content": "from django.contrib import admin\nfrom main_app.models import *\n\n# Register your models here.\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n # these fields should not be editable\n exclude = (\"created_by\", \"last_updated_by\")\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n pass\n\n\nclass ChantAdmin(BaseModelAdmin):\n pass\n\n\nclass FeastAdmin(BaseModelAdmin):\n pass\n\n\nclass GenreAdmin(BaseModelAdmin):\n pass\n\n\nclass NotationAdmin(BaseModelAdmin):\n pass\n\n\nclass OfficeAdmin(BaseModelAdmin):\n pass\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n pass\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n pass\n\n\nclass SegmentAdmin(BaseModelAdmin):\n pass\n\n\nclass SequenceAdmin(BaseModelAdmin):\n pass\n\n\nclass SourceAdmin(BaseModelAdmin):\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}]} | 3,795 | 596 |
gh_patches_debug_3810 | rasdani/github-patches | git_diff | iterative__dvc-3129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dvc: fix version generatio
Looks like dynamic version got broken https://travis-ci.com/iterative/dvc/jobs/274986530 .
</issue>
<code>
[start of dvc/version.py]
1 # Used in setup.py, so don't pull any additional dependencies
2 #
3 # Based on:
4 # - https://github.com/python/mypy/blob/master/mypy/version.py
5 # - https://github.com/python/mypy/blob/master/mypy/git.py
6 import os
7 import subprocess
8
9
10 _BASE_VERSION = "0.81.0"
11
12
13 def _generate_version(base_version):
14 """Generate a version with information about the git repository"""
15 pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16
17 if not _is_git_repo(pkg_dir) or not _have_git():
18 return base_version
19
20 if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
21 return base_version
22
23 return "{base_version}+{short_sha}{dirty}".format(
24 base_version=base_version,
25 short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
26 dirty=".mod" if _is_dirty(pkg_dir) else "",
27 )
28
29
30 def _is_git_repo(dir_path):
31 """Is the given directory version-controlled with git?"""
32 return os.path.exists(os.path.join(dir_path, ".git"))
33
34
35 def _have_git():
36 """Can we run the git executable?"""
37 try:
38 subprocess.check_output(["git", "--help"])
39 return True
40 except subprocess.CalledProcessError:
41 return False
42 except OSError:
43 return False
44
45
46 def _is_release(dir_path, base_version):
47 try:
48 output = subprocess.check_output(
49 ["git", "describe", "--tags", "--exact-match"],
50 cwd=dir_path,
51 stderr=subprocess.STDOUT,
52 )
53 tag = output.strip()
54 return tag == base_version
55 except subprocess.CalledProcessError:
56 return False
57
58
59 def _git_revision(dir_path):
60 """Get the SHA-1 of the HEAD of a git repository."""
61 return subprocess.check_output(
62 ["git", "rev-parse", "HEAD"], cwd=dir_path
63 ).strip()
64
65
66 def _is_dirty(dir_path):
67 """Check whether a git repository has uncommitted changes."""
68 try:
69 subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
70 return False
71 except subprocess.CalledProcessError:
72 return True
73
74
75 __version__ = _generate_version(_BASE_VERSION)
76
[end of dvc/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/version.py b/dvc/version.py
--- a/dvc/version.py
+++ b/dvc/version.py
@@ -49,7 +49,7 @@
["git", "describe", "--tags", "--exact-match"],
cwd=dir_path,
stderr=subprocess.STDOUT,
- )
+ ).decode("utf-8")
tag = output.strip()
return tag == base_version
except subprocess.CalledProcessError:
| {"golden_diff": "diff --git a/dvc/version.py b/dvc/version.py\n--- a/dvc/version.py\n+++ b/dvc/version.py\n@@ -49,7 +49,7 @@\n [\"git\", \"describe\", \"--tags\", \"--exact-match\"],\n cwd=dir_path,\n stderr=subprocess.STDOUT,\n- )\n+ ).decode(\"utf-8\")\n tag = output.strip()\n return tag == base_version\n except subprocess.CalledProcessError:\n", "issue": "dvc: fix version generatio\nLooks like dynamic version got broken https://travis-ci.com/iterative/dvc/jobs/274986530 .\r\n\n", "before_files": [{"content": "# Used in setup.py, so don't pull any additional dependencies\n#\n# Based on:\n# - https://github.com/python/mypy/blob/master/mypy/version.py\n# - https://github.com/python/mypy/blob/master/mypy/git.py\nimport os\nimport subprocess\n\n\n_BASE_VERSION = \"0.81.0\"\n\n\ndef _generate_version(base_version):\n \"\"\"Generate a version with information about the git repository\"\"\"\n pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n if not _is_git_repo(pkg_dir) or not _have_git():\n return base_version\n\n if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):\n return base_version\n\n return \"{base_version}+{short_sha}{dirty}\".format(\n base_version=base_version,\n short_sha=_git_revision(pkg_dir).decode(\"utf-8\")[0:6],\n dirty=\".mod\" if _is_dirty(pkg_dir) else \"\",\n )\n\n\ndef _is_git_repo(dir_path):\n \"\"\"Is the given directory version-controlled with git?\"\"\"\n return os.path.exists(os.path.join(dir_path, \".git\"))\n\n\ndef _have_git():\n \"\"\"Can we run the git executable?\"\"\"\n try:\n subprocess.check_output([\"git\", \"--help\"])\n return True\n except subprocess.CalledProcessError:\n return False\n except OSError:\n return False\n\n\ndef _is_release(dir_path, base_version):\n try:\n output = subprocess.check_output(\n [\"git\", \"describe\", \"--tags\", \"--exact-match\"],\n cwd=dir_path,\n stderr=subprocess.STDOUT,\n )\n tag = output.strip()\n return tag == base_version\n except subprocess.CalledProcessError:\n return False\n\n\ndef _git_revision(dir_path):\n \"\"\"Get the SHA-1 of the HEAD of a git repository.\"\"\"\n return subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"], cwd=dir_path\n ).strip()\n\n\ndef _is_dirty(dir_path):\n \"\"\"Check whether a git repository has uncommitted changes.\"\"\"\n try:\n subprocess.check_call([\"git\", \"diff\", \"--quiet\"], cwd=dir_path)\n return False\n except subprocess.CalledProcessError:\n return True\n\n\n__version__ = _generate_version(_BASE_VERSION)\n", "path": "dvc/version.py"}]} | 1,219 | 100 |
gh_patches_debug_13402 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-10971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
openload.co extractor not working
youtube-dl --get-url --verbose https://openload.co/embed/kUEfGclsU9o/
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--get-url', u'--verbose', u'https://openload.co/embed/kUEfGclsU9o/']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2016.08.22
[debug] Python version 2.6.6 - Linux-2.6.32-642.1.1.el6.x86_64-x86_64-with-centos-6.8-Final
[debug] exe versions: ffmpeg 0.6.5, ffprobe 0.6.5
[debug] Proxy map: {}
ERROR: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 691, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 347, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/openload.py", line 62, in _real_extract
r'<img[^>]+id="linkimg"[^>]+src="([^"]+)"', webpage, 'link image')
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 650, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
</issue>
<code>
[start of youtube_dl/extractor/openload.py]
1 # coding: utf-8
2 from __future__ import unicode_literals, division
3
4 from .common import InfoExtractor
5 from ..compat import (
6 compat_chr,
7 compat_ord,
8 )
9 from ..utils import (
10 determine_ext,
11 ExtractorError,
12 )
13
14
15 class OpenloadIE(InfoExtractor):
16 _VALID_URL = r'https?://openload\.(?:co|io)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
17
18 _TESTS = [{
19 'url': 'https://openload.co/f/kUEfGclsU9o',
20 'md5': 'bf1c059b004ebc7a256f89408e65c36e',
21 'info_dict': {
22 'id': 'kUEfGclsU9o',
23 'ext': 'mp4',
24 'title': 'skyrim_no-audio_1080.mp4',
25 'thumbnail': 're:^https?://.*\.jpg$',
26 },
27 }, {
28 'url': 'https://openload.co/embed/rjC09fkPLYs',
29 'info_dict': {
30 'id': 'rjC09fkPLYs',
31 'ext': 'mp4',
32 'title': 'movie.mp4',
33 'thumbnail': 're:^https?://.*\.jpg$',
34 'subtitles': {
35 'en': [{
36 'ext': 'vtt',
37 }],
38 },
39 },
40 'params': {
41 'skip_download': True, # test subtitles only
42 },
43 }, {
44 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4',
45 'only_matching': True,
46 }, {
47 'url': 'https://openload.io/f/ZAn6oz-VZGE/',
48 'only_matching': True,
49 }, {
50 'url': 'https://openload.co/f/_-ztPaZtMhM/',
51 'only_matching': True,
52 }, {
53 # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout
54 # for title and ext
55 'url': 'https://openload.co/embed/Sxz5sADo82g/',
56 'only_matching': True,
57 }]
58
59 def _real_extract(self, url):
60 video_id = self._match_id(url)
61 webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)
62
63 if 'File not found' in webpage or 'deleted by the owner' in webpage:
64 raise ExtractorError('File not found', expected=True)
65
66 # The following decryption algorithm is written by @yokrysty and
67 # declared to be freely used in youtube-dl
68 # See https://github.com/rg3/youtube-dl/issues/10408
69 enc_data = self._html_search_regex(
70 r'<span[^>]*>([^<]+)</span>\s*<span[^>]*>[^<]+</span>\s*<span[^>]+id="streamurl"',
71 webpage, 'encrypted data')
72
73 video_url_chars = []
74
75 for idx, c in enumerate(enc_data):
76 j = compat_ord(c)
77 if j >= 33 and j <= 126:
78 j = ((j + 14) % 94) + 33
79 if idx == len(enc_data) - 1:
80 j += 2
81 video_url_chars += compat_chr(j)
82
83 video_url = 'https://openload.co/stream/%s?mime=true' % ''.join(video_url_chars)
84
85 title = self._og_search_title(webpage, default=None) or self._search_regex(
86 r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
87 'title', default=None) or self._html_search_meta(
88 'description', webpage, 'title', fatal=True)
89
90 entries = self._parse_html5_media_entries(url, webpage, video_id)
91 subtitles = entries[0]['subtitles'] if entries else None
92
93 info_dict = {
94 'id': video_id,
95 'title': title,
96 'thumbnail': self._og_search_thumbnail(webpage, default=None),
97 'url': video_url,
98 # Seems all videos have extensions in their titles
99 'ext': determine_ext(title),
100 'subtitles': subtitles,
101 }
102
103 return info_dict
104
[end of youtube_dl/extractor/openload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py
--- a/youtube_dl/extractor/openload.py
+++ b/youtube_dl/extractor/openload.py
@@ -70,10 +70,15 @@
r'<span[^>]*>([^<]+)</span>\s*<span[^>]*>[^<]+</span>\s*<span[^>]+id="streamurl"',
webpage, 'encrypted data')
+ magic = compat_ord(enc_data[-1])
video_url_chars = []
for idx, c in enumerate(enc_data):
j = compat_ord(c)
+ if j == magic:
+ j -= 1
+ elif j == magic - 1:
+ j += 1
if j >= 33 and j <= 126:
j = ((j + 14) % 94) + 33
if idx == len(enc_data) - 1:
| {"golden_diff": "diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py\n--- a/youtube_dl/extractor/openload.py\n+++ b/youtube_dl/extractor/openload.py\n@@ -70,10 +70,15 @@\n r'<span[^>]*>([^<]+)</span>\\s*<span[^>]*>[^<]+</span>\\s*<span[^>]+id=\"streamurl\"',\n webpage, 'encrypted data')\n \n+ magic = compat_ord(enc_data[-1])\n video_url_chars = []\n \n for idx, c in enumerate(enc_data):\n j = compat_ord(c)\n+ if j == magic:\n+ j -= 1\n+ elif j == magic - 1:\n+ j += 1\n if j >= 33 and j <= 126:\n j = ((j + 14) % 94) + 33\n if idx == len(enc_data) - 1:\n", "issue": "openload.co extractor not working\n youtube-dl --get-url --verbose https://openload.co/embed/kUEfGclsU9o/\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: [u'--get-url', u'--verbose', u'https://openload.co/embed/kUEfGclsU9o/']\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\n[debug] youtube-dl version 2016.08.22\n[debug] Python version 2.6.6 - Linux-2.6.32-642.1.1.el6.x86_64-x86_64-with-centos-6.8-Final\n[debug] exe versions: ffmpeg 0.6.5, ffprobe 0.6.5\n[debug] Proxy map: {}\nERROR: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 691, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 347, in extract\n return self._real_extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/openload.py\", line 62, in _real_extract\n r'<img[^>]+id=\"linkimg\"[^>]+src=\"([^\"]+)\"', webpage, 'link image')\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 650, in _search_regex\n raise RegexNotFoundError('Unable to extract %s' % _name)\nRegexNotFoundError: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals, division\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_chr,\n compat_ord,\n)\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass OpenloadIE(InfoExtractor):\n _VALID_URL = r'https?://openload\\.(?:co|io)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'\n\n _TESTS = [{\n 'url': 'https://openload.co/f/kUEfGclsU9o',\n 'md5': 'bf1c059b004ebc7a256f89408e65c36e',\n 'info_dict': {\n 'id': 'kUEfGclsU9o',\n 'ext': 'mp4',\n 'title': 'skyrim_no-audio_1080.mp4',\n 'thumbnail': 're:^https?://.*\\.jpg$',\n },\n }, {\n 'url': 'https://openload.co/embed/rjC09fkPLYs',\n 'info_dict': {\n 'id': 'rjC09fkPLYs',\n 'ext': 'mp4',\n 'title': 'movie.mp4',\n 'thumbnail': 're:^https?://.*\\.jpg$',\n 'subtitles': {\n 'en': [{\n 'ext': 'vtt',\n }],\n },\n },\n 'params': {\n 'skip_download': True, # test subtitles only\n },\n }, {\n 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4',\n 'only_matching': True,\n }, {\n 'url': 'https://openload.io/f/ZAn6oz-VZGE/',\n 'only_matching': True,\n }, {\n 'url': 'https://openload.co/f/_-ztPaZtMhM/',\n 'only_matching': True,\n }, {\n # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout\n # for title and ext\n 'url': 'https://openload.co/embed/Sxz5sADo82g/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)\n\n if 'File not found' in webpage or 'deleted by the owner' in webpage:\n raise ExtractorError('File not found', expected=True)\n\n # The following decryption algorithm is written by @yokrysty and\n # declared to be freely used in youtube-dl\n # See https://github.com/rg3/youtube-dl/issues/10408\n enc_data = self._html_search_regex(\n r'<span[^>]*>([^<]+)</span>\\s*<span[^>]*>[^<]+</span>\\s*<span[^>]+id=\"streamurl\"',\n webpage, 'encrypted data')\n\n video_url_chars = []\n\n for idx, c in enumerate(enc_data):\n j = compat_ord(c)\n if j >= 33 and j <= 126:\n j = ((j + 14) % 94) + 33\n if idx == len(enc_data) - 1:\n j += 2\n video_url_chars += compat_chr(j)\n\n video_url = 'https://openload.co/stream/%s?mime=true' % ''.join(video_url_chars)\n\n title = self._og_search_title(webpage, default=None) or self._search_regex(\n r'<span[^>]+class=[\"\\']title[\"\\'][^>]*>([^<]+)', webpage,\n 'title', default=None) or self._html_search_meta(\n 'description', webpage, 'title', fatal=True)\n\n entries = self._parse_html5_media_entries(url, webpage, video_id)\n subtitles = entries[0]['subtitles'] if entries else None\n\n info_dict = {\n 'id': video_id,\n 'title': title,\n 'thumbnail': self._og_search_thumbnail(webpage, default=None),\n 'url': video_url,\n # Seems all videos have extensions in their titles\n 'ext': determine_ext(title),\n 'subtitles': subtitles,\n }\n\n return info_dict\n", "path": "youtube_dl/extractor/openload.py"}]} | 2,250 | 223 |
gh_patches_debug_31725 | rasdani/github-patches | git_diff | pyca__cryptography-3880 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RFC 5649 support
RFC 3394 (AES Key Wrap) was added a while back. I'd like to request support for RFC 5649 (AES Key Wrap with Padding), since it builds off of RFC 3394. It looks like OpenSSL handled this back in 2015:
https://rt.openssl.org/Ticket/Display.html?id=3675&user=guest&pass=guest
Is this feasible for cryptography in the not-too-distant future?
Thanks,
Peter
</issue>
<code>
[start of src/cryptography/hazmat/primitives/keywrap.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import struct
8
9 from cryptography.hazmat.primitives.ciphers import Cipher
10 from cryptography.hazmat.primitives.ciphers.algorithms import AES
11 from cryptography.hazmat.primitives.ciphers.modes import ECB
12 from cryptography.hazmat.primitives.constant_time import bytes_eq
13
14
15 def _wrap_core(wrapping_key, a, r, backend):
16 # RFC 3394 Key Wrap - 2.2.1 (index method)
17 encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor()
18 n = len(r)
19 for j in range(6):
20 for i in range(n):
21 # every encryption operation is a discrete 16 byte chunk (because
22 # AES has a 128-bit block size) and since we're using ECB it is
23 # safe to reuse the encryptor for the entire operation
24 b = encryptor.update(a + r[i])
25 # pack/unpack are safe as these are always 64-bit chunks
26 a = struct.pack(
27 ">Q", struct.unpack(">Q", b[:8])[0] ^ ((n * j) + i + 1)
28 )
29 r[i] = b[-8:]
30
31 assert encryptor.finalize() == b""
32
33 return a + b"".join(r)
34
35
36 def aes_key_wrap(wrapping_key, key_to_wrap, backend):
37 if len(wrapping_key) not in [16, 24, 32]:
38 raise ValueError("The wrapping key must be a valid AES key length")
39
40 if len(key_to_wrap) < 16:
41 raise ValueError("The key to wrap must be at least 16 bytes")
42
43 if len(key_to_wrap) % 8 != 0:
44 raise ValueError("The key to wrap must be a multiple of 8 bytes")
45
46 a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
47 r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)]
48 return _wrap_core(wrapping_key, a, r, backend)
49
50
51 def _unwrap_core(wrapping_key, a, r, backend):
52 # Implement RFC 3394 Key Unwrap - 2.2.2 (index method)
53 decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor()
54 n = len(r)
55 for j in reversed(range(6)):
56 for i in reversed(range(n)):
57 # pack/unpack are safe as these are always 64-bit chunks
58 atr = struct.pack(
59 ">Q", struct.unpack(">Q", a)[0] ^ ((n * j) + i + 1)
60 ) + r[i]
61 # every decryption operation is a discrete 16 byte chunk so
62 # it is safe to reuse the decryptor for the entire operation
63 b = decryptor.update(atr)
64 a = b[:8]
65 r[i] = b[-8:]
66
67 assert decryptor.finalize() == b""
68 return a, r
69
70
71 def aes_key_unwrap(wrapping_key, wrapped_key, backend):
72 if len(wrapped_key) < 24:
73 raise ValueError("Must be at least 24 bytes")
74
75 if len(wrapped_key) % 8 != 0:
76 raise ValueError("The wrapped key must be a multiple of 8 bytes")
77
78 if len(wrapping_key) not in [16, 24, 32]:
79 raise ValueError("The wrapping key must be a valid AES key length")
80
81 aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
82 r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)]
83 a = r.pop(0)
84 a, r = _unwrap_core(wrapping_key, a, r, backend)
85 if not bytes_eq(a, aiv):
86 raise InvalidUnwrap()
87
88 return b"".join(r)
89
90
91 class InvalidUnwrap(Exception):
92 pass
93
[end of src/cryptography/hazmat/primitives/keywrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/keywrap.py b/src/cryptography/hazmat/primitives/keywrap.py
--- a/src/cryptography/hazmat/primitives/keywrap.py
+++ b/src/cryptography/hazmat/primitives/keywrap.py
@@ -68,6 +68,63 @@
return a, r
+def aes_key_wrap_with_padding(wrapping_key, key_to_wrap, backend):
+ if len(wrapping_key) not in [16, 24, 32]:
+ raise ValueError("The wrapping key must be a valid AES key length")
+
+ aiv = b"\xA6\x59\x59\xA6" + struct.pack(">i", len(key_to_wrap))
+ # pad the key to wrap if necessary
+ pad = (8 - (len(key_to_wrap) % 8)) % 8
+ key_to_wrap = key_to_wrap + b"\x00" * pad
+ if len(key_to_wrap) == 8:
+ # RFC 5649 - 4.1 - exactly 8 octets after padding
+ encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor()
+ b = encryptor.update(aiv + key_to_wrap)
+ assert encryptor.finalize() == b""
+ return b
+ else:
+ r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)]
+ return _wrap_core(wrapping_key, aiv, r, backend)
+
+
+def aes_key_unwrap_with_padding(wrapping_key, wrapped_key, backend):
+ if len(wrapped_key) < 16:
+ raise ValueError("Must be at least 16 bytes")
+
+ if len(wrapping_key) not in [16, 24, 32]:
+ raise ValueError("The wrapping key must be a valid AES key length")
+
+ if len(wrapped_key) == 16:
+ # RFC 5649 - 4.2 - exactly two 64-bit blocks
+ decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor()
+ b = decryptor.update(wrapped_key)
+ assert decryptor.finalize() == b""
+ a = b[:8]
+ data = b[8:]
+ n = 1
+ else:
+ r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)]
+ encrypted_aiv = r.pop(0)
+ n = len(r)
+ a, r = _unwrap_core(wrapping_key, encrypted_aiv, r, backend)
+ data = b"".join(r)
+
+ # 1) Check that MSB(32,A) = A65959A6.
+ # 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let
+ # MLI = LSB(32,A).
+ # 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of
+ # the output data are zero.
+ (mli,) = struct.unpack(">I", a[4:])
+ b = (8 * n) - mli
+ if (
+ not bytes_eq(a[:4], b"\xa6\x59\x59\xa6") or not
+ 8 * (n - 1) < mli <= 8 * n or not bytes_eq(data[-b:], b"\x00" * b)
+ ):
+ raise InvalidUnwrap()
+
+ return data[:-b]
+
+
def aes_key_unwrap(wrapping_key, wrapped_key, backend):
if len(wrapped_key) < 24:
raise ValueError("Must be at least 24 bytes")
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/keywrap.py b/src/cryptography/hazmat/primitives/keywrap.py\n--- a/src/cryptography/hazmat/primitives/keywrap.py\n+++ b/src/cryptography/hazmat/primitives/keywrap.py\n@@ -68,6 +68,63 @@\n return a, r\n \n \n+def aes_key_wrap_with_padding(wrapping_key, key_to_wrap, backend):\n+ if len(wrapping_key) not in [16, 24, 32]:\n+ raise ValueError(\"The wrapping key must be a valid AES key length\")\n+\n+ aiv = b\"\\xA6\\x59\\x59\\xA6\" + struct.pack(\">i\", len(key_to_wrap))\n+ # pad the key to wrap if necessary\n+ pad = (8 - (len(key_to_wrap) % 8)) % 8\n+ key_to_wrap = key_to_wrap + b\"\\x00\" * pad\n+ if len(key_to_wrap) == 8:\n+ # RFC 5649 - 4.1 - exactly 8 octets after padding\n+ encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor()\n+ b = encryptor.update(aiv + key_to_wrap)\n+ assert encryptor.finalize() == b\"\"\n+ return b\n+ else:\n+ r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)]\n+ return _wrap_core(wrapping_key, aiv, r, backend)\n+\n+\n+def aes_key_unwrap_with_padding(wrapping_key, wrapped_key, backend):\n+ if len(wrapped_key) < 16:\n+ raise ValueError(\"Must be at least 16 bytes\")\n+\n+ if len(wrapping_key) not in [16, 24, 32]:\n+ raise ValueError(\"The wrapping key must be a valid AES key length\")\n+\n+ if len(wrapped_key) == 16:\n+ # RFC 5649 - 4.2 - exactly two 64-bit blocks\n+ decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor()\n+ b = decryptor.update(wrapped_key)\n+ assert decryptor.finalize() == b\"\"\n+ a = b[:8]\n+ data = b[8:]\n+ n = 1\n+ else:\n+ r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)]\n+ encrypted_aiv = r.pop(0)\n+ n = len(r)\n+ a, r = _unwrap_core(wrapping_key, encrypted_aiv, r, backend)\n+ data = b\"\".join(r)\n+\n+ # 1) Check that MSB(32,A) = A65959A6.\n+ # 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let\n+ # MLI = LSB(32,A).\n+ # 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of\n+ # the output data are zero.\n+ (mli,) = struct.unpack(\">I\", a[4:])\n+ b = (8 * n) - mli\n+ if (\n+ not bytes_eq(a[:4], b\"\\xa6\\x59\\x59\\xa6\") or not\n+ 8 * (n - 1) < mli <= 8 * n or not bytes_eq(data[-b:], b\"\\x00\" * b)\n+ ):\n+ raise InvalidUnwrap()\n+\n+ return data[:-b]\n+\n+\n def aes_key_unwrap(wrapping_key, wrapped_key, backend):\n if len(wrapped_key) < 24:\n raise ValueError(\"Must be at least 24 bytes\")\n", "issue": "RFC 5649 support\nRFC 3394 (AES Key Wrap) was added a while back. I'd like to request support for RFC 5649 (AES Key Wrap with Padding), since it builds off of RFC 3394. It looks like OpenSSL handled this back in 2015:\r\n\r\nhttps://rt.openssl.org/Ticket/Display.html?id=3675&user=guest&pass=guest\r\n\r\nIs this feasible for cryptography in the not-too-distant future?\r\n\r\nThanks,\r\nPeter\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport struct\n\nfrom cryptography.hazmat.primitives.ciphers import Cipher\nfrom cryptography.hazmat.primitives.ciphers.algorithms import AES\nfrom cryptography.hazmat.primitives.ciphers.modes import ECB\nfrom cryptography.hazmat.primitives.constant_time import bytes_eq\n\n\ndef _wrap_core(wrapping_key, a, r, backend):\n # RFC 3394 Key Wrap - 2.2.1 (index method)\n encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor()\n n = len(r)\n for j in range(6):\n for i in range(n):\n # every encryption operation is a discrete 16 byte chunk (because\n # AES has a 128-bit block size) and since we're using ECB it is\n # safe to reuse the encryptor for the entire operation\n b = encryptor.update(a + r[i])\n # pack/unpack are safe as these are always 64-bit chunks\n a = struct.pack(\n \">Q\", struct.unpack(\">Q\", b[:8])[0] ^ ((n * j) + i + 1)\n )\n r[i] = b[-8:]\n\n assert encryptor.finalize() == b\"\"\n\n return a + b\"\".join(r)\n\n\ndef aes_key_wrap(wrapping_key, key_to_wrap, backend):\n if len(wrapping_key) not in [16, 24, 32]:\n raise ValueError(\"The wrapping key must be a valid AES key length\")\n\n if len(key_to_wrap) < 16:\n raise ValueError(\"The key to wrap must be at least 16 bytes\")\n\n if len(key_to_wrap) % 8 != 0:\n raise ValueError(\"The key to wrap must be a multiple of 8 bytes\")\n\n a = b\"\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\"\n r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)]\n return _wrap_core(wrapping_key, a, r, backend)\n\n\ndef _unwrap_core(wrapping_key, a, r, backend):\n # Implement RFC 3394 Key Unwrap - 2.2.2 (index method)\n decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor()\n n = len(r)\n for j in reversed(range(6)):\n for i in reversed(range(n)):\n # pack/unpack are safe as these are always 64-bit chunks\n atr = struct.pack(\n \">Q\", struct.unpack(\">Q\", a)[0] ^ ((n * j) + i + 1)\n ) + r[i]\n # every decryption operation is a discrete 16 byte chunk so\n # it is safe to reuse the decryptor for the entire operation\n b = decryptor.update(atr)\n a = b[:8]\n r[i] = b[-8:]\n\n assert decryptor.finalize() == b\"\"\n return a, r\n\n\ndef aes_key_unwrap(wrapping_key, wrapped_key, backend):\n if len(wrapped_key) < 24:\n raise ValueError(\"Must be at least 24 bytes\")\n\n if len(wrapped_key) % 8 != 0:\n raise ValueError(\"The wrapped key must be a multiple of 8 bytes\")\n\n if len(wrapping_key) not in [16, 24, 32]:\n raise ValueError(\"The wrapping key must be a valid AES key length\")\n\n aiv = b\"\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\\xa6\"\n r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)]\n a = r.pop(0)\n a, r = _unwrap_core(wrapping_key, a, r, backend)\n if not bytes_eq(a, aiv):\n raise InvalidUnwrap()\n\n return b\"\".join(r)\n\n\nclass InvalidUnwrap(Exception):\n pass\n", "path": "src/cryptography/hazmat/primitives/keywrap.py"}]} | 1,791 | 885 |
gh_patches_debug_31639 | rasdani/github-patches | git_diff | ESMCI__cime-1136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ERR test does not always report failures correctly
The ERR test runs four separate jobs, if one of these jobs completes but the next fails to launch, the test reports PASS. To reproduce this problem its enough to edit the jobid_pattern field in config batch so that the dependency is incorrect - this causes the first job to exit and the second to fail to launch. But the TestStatus file indicates all PASS.
</issue>
<code>
[start of utils/python/CIME/case_submit.py]
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 """
8 import socket
9 from CIME.XML.standard_module_setup import *
10 from CIME.utils import expect, append_status
11 from CIME.preview_namelists import create_namelists
12 from CIME.check_lockedfiles import check_lockedfiles
13 from CIME.check_input_data import check_all_input_data
14 from CIME.case_cmpgen_namelists import case_cmpgen_namelists
15
16 logger = logging.getLogger(__name__)
17
18 def submit(case, job=None, resubmit=False, no_batch=False):
19 caseroot = case.get_value("CASEROOT")
20
21 if job is None:
22 if case.get_value("TEST"):
23 job = "case.test"
24 else:
25 job = "case.run"
26
27 if resubmit:
28 resub = case.get_value("RESUBMIT")
29 logger.info("Submitting job '%s', resubmit=%d" % (job, resub))
30 case.set_value("RESUBMIT",resub-1)
31 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
32 case.set_value("CONTINUE_RUN", True)
33 else:
34 if job in ("case.test","case.run"):
35 check_case(case, caseroot)
36 check_DA_settings(case)
37 if case.get_value("MACH") == "mira":
38 with open(".original_host","w") as fd:
39 fd.write( socket.gethostname())
40
41 # if case.submit is called with the no_batch flag then we assume that this
42 # flag will stay in effect for the duration of the RESUBMITs
43 env_batch = case.get_env("batch")
44 if not resubmit:
45 case.set_value("IS_FIRST_RUN", True)
46 if no_batch:
47 batch_system = "none"
48 else:
49 batch_system = env_batch.get_batch_system_type()
50 case.set_value("BATCH_SYSTEM", batch_system)
51 else:
52 if env_batch.get_batch_system_type() == "none":
53 no_batch = True
54
55 # This is a resubmission, do not reinitialize test values
56 case.set_value("IS_FIRST_RUN", False)
57
58 #Load Modules
59 case.load_env()
60
61 case.set_value("RUN_WITH_SUBMIT",True)
62 case.flush()
63
64 logger.warn("submit_jobs %s"%job)
65 job_ids = case.submit_jobs(no_batch=no_batch, job=job)
66 msg = "Submitted jobs %s"%job_ids
67 append_status(msg, caseroot=caseroot, sfile="CaseStatus")
68
69 def check_case(case, caseroot):
70 check_lockedfiles(caseroot)
71 create_namelists(case) # Must be called before check_all_input_data
72 logger.info("Checking that inputdata is available as part of case submission")
73 check_all_input_data(case)
74 # Now that we have baselines, do baseline operations
75 if case.get_value("TEST"):
76 case_cmpgen_namelists(case)
77
78 expect(case.get_value("BUILD_COMPLETE"), "Build complete is "
79 "not True please rebuild the model by calling case.build")
80 logger.info("Check case OK")
81
82 def check_DA_settings(case):
83 if case.get_value("DATA_ASSIMILATION"):
84 script = case.get_value("DATA_ASSIMILATION_SCRIPT")
85 cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
86 logger.info("Data Assimilation enabled using script %s with %d cycles"%(script,cycles))
87
88
[end of utils/python/CIME/case_submit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/case_submit.py b/utils/python/CIME/case_submit.py
--- a/utils/python/CIME/case_submit.py
+++ b/utils/python/CIME/case_submit.py
@@ -12,10 +12,11 @@
from CIME.check_lockedfiles import check_lockedfiles
from CIME.check_input_data import check_all_input_data
from CIME.case_cmpgen_namelists import case_cmpgen_namelists
+from CIME.test_status import *
logger = logging.getLogger(__name__)
-def submit(case, job=None, resubmit=False, no_batch=False):
+def _submit(case, job=None, resubmit=False, no_batch=False):
caseroot = case.get_value("CASEROOT")
if job is None:
@@ -61,11 +62,27 @@
case.set_value("RUN_WITH_SUBMIT",True)
case.flush()
- logger.warn("submit_jobs %s"%job)
+ logger.warn("submit_jobs %s" % job)
job_ids = case.submit_jobs(no_batch=no_batch, job=job)
- msg = "Submitted jobs %s"%job_ids
+ msg = "Submitted jobs %s" % job_ids
append_status(msg, caseroot=caseroot, sfile="CaseStatus")
+def submit(case, job=None, resubmit=False, no_batch=False):
+ try:
+ _submit(case, job=job, resubmit=resubmit, no_batch=no_batch)
+ except:
+ # If something failed in the batch system, make sure to mark
+ # the test as failed if we are running a test.
+ if case.get_value("TEST"):
+ caseroot = case.get_value("CASEROOT")
+ casebaseid = case.get_value("CASEBASEID")
+ with TestStatus(test_dir=caseroot, test_name=casebaseid, lock=True) as ts:
+ ts.set_status(RUN_PHASE, TEST_FAIL_STATUS, comments="batch system failure")
+
+ append_status("Batch submission failed, TestStatus file changed to read-only", caseroot=caseroot, sfile="TestStatus.log")
+
+ raise
+
def check_case(case, caseroot):
check_lockedfiles(caseroot)
create_namelists(case) # Must be called before check_all_input_data
| {"golden_diff": "diff --git a/utils/python/CIME/case_submit.py b/utils/python/CIME/case_submit.py\n--- a/utils/python/CIME/case_submit.py\n+++ b/utils/python/CIME/case_submit.py\n@@ -12,10 +12,11 @@\n from CIME.check_lockedfiles import check_lockedfiles\n from CIME.check_input_data import check_all_input_data\n from CIME.case_cmpgen_namelists import case_cmpgen_namelists\n+from CIME.test_status import *\n \n logger = logging.getLogger(__name__)\n \n-def submit(case, job=None, resubmit=False, no_batch=False):\n+def _submit(case, job=None, resubmit=False, no_batch=False):\n caseroot = case.get_value(\"CASEROOT\")\n \n if job is None:\n@@ -61,11 +62,27 @@\n case.set_value(\"RUN_WITH_SUBMIT\",True)\n case.flush()\n \n- logger.warn(\"submit_jobs %s\"%job)\n+ logger.warn(\"submit_jobs %s\" % job)\n job_ids = case.submit_jobs(no_batch=no_batch, job=job)\n- msg = \"Submitted jobs %s\"%job_ids\n+ msg = \"Submitted jobs %s\" % job_ids\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n \n+def submit(case, job=None, resubmit=False, no_batch=False):\n+ try:\n+ _submit(case, job=job, resubmit=resubmit, no_batch=no_batch)\n+ except:\n+ # If something failed in the batch system, make sure to mark\n+ # the test as failed if we are running a test.\n+ if case.get_value(\"TEST\"):\n+ caseroot = case.get_value(\"CASEROOT\")\n+ casebaseid = case.get_value(\"CASEBASEID\")\n+ with TestStatus(test_dir=caseroot, test_name=casebaseid, lock=True) as ts:\n+ ts.set_status(RUN_PHASE, TEST_FAIL_STATUS, comments=\"batch system failure\")\n+\n+ append_status(\"Batch submission failed, TestStatus file changed to read-only\", caseroot=caseroot, sfile=\"TestStatus.log\")\n+\n+ raise\n+\n def check_case(case, caseroot):\n check_lockedfiles(caseroot)\n create_namelists(case) # Must be called before check_all_input_data\n", "issue": "ERR test does not always report failures correctly\nThe ERR test runs four separate jobs, if one of these jobs completes but the next fails to launch, the test reports PASS. To reproduce this problem its enough to edit the jobid_pattern field in config batch so that the dependency is incorrect - this causes the first job to exit and the second to fail to launch. But the TestStatus file indicates all PASS. \n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, append_status\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.check_input_data import check_all_input_data\nfrom CIME.case_cmpgen_namelists import case_cmpgen_namelists\n\nlogger = logging.getLogger(__name__)\n\ndef submit(case, job=None, resubmit=False, no_batch=False):\n caseroot = case.get_value(\"CASEROOT\")\n\n if job is None:\n if case.get_value(\"TEST\"):\n job = \"case.test\"\n else:\n job = \"case.run\"\n\n if resubmit:\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '%s', resubmit=%d\" % (job, resub))\n case.set_value(\"RESUBMIT\",resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n else:\n if job in (\"case.test\",\"case.run\"):\n check_case(case, caseroot)\n check_DA_settings(case)\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\",\"w\") as fd:\n fd.write( socket.gethostname())\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if not resubmit:\n case.set_value(\"IS_FIRST_RUN\", True)\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n else:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n case.set_value(\"IS_FIRST_RUN\", False)\n\n #Load Modules\n case.load_env()\n\n case.set_value(\"RUN_WITH_SUBMIT\",True)\n case.flush()\n\n logger.warn(\"submit_jobs %s\"%job)\n job_ids = case.submit_jobs(no_batch=no_batch, job=job)\n msg = \"Submitted jobs %s\"%job_ids\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\ndef check_case(case, caseroot):\n check_lockedfiles(caseroot)\n create_namelists(case) # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n check_all_input_data(case)\n # Now that we have baselines, do baseline operations\n if case.get_value(\"TEST\"):\n case_cmpgen_namelists(case)\n\n expect(case.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(case):\n if case.get_value(\"DATA_ASSIMILATION\"):\n script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n logger.info(\"Data Assimilation enabled using script %s with %d cycles\"%(script,cycles))\n\n", "path": "utils/python/CIME/case_submit.py"}]} | 1,566 | 520 |
gh_patches_debug_13385 | rasdani/github-patches | git_diff | lnbits__lnbits-2074 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LNDRestWallet: catch http errors
This PR catches the errors in `lndrest.py` that can happen when for example the LND certificate has expired.
This can lead to stuck payments.
</issue>
<code>
[start of lnbits/wallets/lndrest.py]
1 import asyncio
2 import base64
3 import hashlib
4 import json
5 from typing import AsyncGenerator, Dict, Optional
6
7 import httpx
8 from loguru import logger
9
10 from lnbits.nodes.lndrest import LndRestNode
11 from lnbits.settings import settings
12
13 from .base import (
14 InvoiceResponse,
15 PaymentResponse,
16 PaymentStatus,
17 StatusResponse,
18 Wallet,
19 )
20 from .macaroon import AESCipher, load_macaroon
21
22
23 class LndRestWallet(Wallet):
24 """https://api.lightning.community/rest/index.html#lnd-rest-api-reference"""
25
26 __node_cls__ = LndRestNode
27
28 def __init__(self):
29 endpoint = settings.lnd_rest_endpoint
30 cert = settings.lnd_rest_cert
31
32 macaroon = (
33 settings.lnd_rest_macaroon
34 or settings.lnd_admin_macaroon
35 or settings.lnd_rest_admin_macaroon
36 or settings.lnd_invoice_macaroon
37 or settings.lnd_rest_invoice_macaroon
38 )
39
40 encrypted_macaroon = settings.lnd_rest_macaroon_encrypted
41 if encrypted_macaroon:
42 macaroon = AESCipher(description="macaroon decryption").decrypt(
43 encrypted_macaroon
44 )
45
46 if not endpoint:
47 raise Exception("cannot initialize lndrest: no endpoint")
48
49 if not macaroon:
50 raise Exception("cannot initialize lndrest: no macaroon")
51
52 if not cert:
53 logger.warning(
54 "no certificate for lndrest provided, this only works if you have a"
55 " publicly issued certificate"
56 )
57
58 endpoint = endpoint[:-1] if endpoint.endswith("/") else endpoint
59 endpoint = (
60 f"https://{endpoint}" if not endpoint.startswith("http") else endpoint
61 )
62 self.endpoint = endpoint
63 self.macaroon = load_macaroon(macaroon)
64
65 # if no cert provided it should be public so we set verify to True
66 # and it will still check for validity of certificate and fail if its not valid
67 # even on startup
68 self.cert = cert or True
69
70 self.auth = {"Grpc-Metadata-macaroon": self.macaroon}
71 self.client = httpx.AsyncClient(
72 base_url=self.endpoint, headers=self.auth, verify=self.cert
73 )
74
75 async def cleanup(self):
76 try:
77 await self.client.aclose()
78 except RuntimeError as e:
79 logger.warning(f"Error closing wallet connection: {e}")
80
81 async def status(self) -> StatusResponse:
82 try:
83 r = await self.client.get("/v1/balance/channels")
84 r.raise_for_status()
85 except (httpx.ConnectError, httpx.RequestError) as exc:
86 return StatusResponse(f"Unable to connect to {self.endpoint}. {exc}", 0)
87
88 try:
89 data = r.json()
90 if r.is_error:
91 raise Exception
92 except Exception:
93 return StatusResponse(r.text[:200], 0)
94
95 return StatusResponse(None, int(data["balance"]) * 1000)
96
97 async def create_invoice(
98 self,
99 amount: int,
100 memo: Optional[str] = None,
101 description_hash: Optional[bytes] = None,
102 unhashed_description: Optional[bytes] = None,
103 **kwargs,
104 ) -> InvoiceResponse:
105 data: Dict = {"value": amount, "private": True, "memo": memo or ""}
106 if kwargs.get("expiry"):
107 data["expiry"] = kwargs["expiry"]
108 if description_hash:
109 data["description_hash"] = base64.b64encode(description_hash).decode(
110 "ascii"
111 )
112 elif unhashed_description:
113 data["description_hash"] = base64.b64encode(
114 hashlib.sha256(unhashed_description).digest()
115 ).decode("ascii")
116
117 r = await self.client.post(url="/v1/invoices", json=data)
118
119 if r.is_error:
120 error_message = r.text
121 try:
122 error_message = r.json()["error"]
123 except Exception:
124 pass
125 return InvoiceResponse(False, None, None, error_message)
126
127 data = r.json()
128 payment_request = data["payment_request"]
129 payment_hash = base64.b64decode(data["r_hash"]).hex()
130 checking_id = payment_hash
131
132 return InvoiceResponse(True, checking_id, payment_request, None)
133
134 async def pay_invoice(self, bolt11: str, fee_limit_msat: int) -> PaymentResponse:
135 # set the fee limit for the payment
136 lnrpcFeeLimit = dict()
137 lnrpcFeeLimit["fixed_msat"] = f"{fee_limit_msat}"
138
139 r = await self.client.post(
140 url="/v1/channels/transactions",
141 json={"payment_request": bolt11, "fee_limit": lnrpcFeeLimit},
142 timeout=None,
143 )
144
145 if r.is_error or r.json().get("payment_error"):
146 error_message = r.json().get("payment_error") or r.text
147 return PaymentResponse(False, None, None, None, error_message)
148
149 data = r.json()
150 checking_id = base64.b64decode(data["payment_hash"]).hex()
151 fee_msat = int(data["payment_route"]["total_fees_msat"])
152 preimage = base64.b64decode(data["payment_preimage"]).hex()
153 return PaymentResponse(True, checking_id, fee_msat, preimage, None)
154
155 async def get_invoice_status(self, checking_id: str) -> PaymentStatus:
156 r = await self.client.get(url=f"/v1/invoice/{checking_id}")
157
158 if r.is_error or not r.json().get("settled"):
159 # this must also work when checking_id is not a hex recognizable by lnd
160 # it will return an error and no "settled" attribute on the object
161 return PaymentStatus(None)
162
163 return PaymentStatus(True)
164
165 async def get_payment_status(self, checking_id: str) -> PaymentStatus:
166 """
167 This routine checks the payment status using routerpc.TrackPaymentV2.
168 """
169 # convert checking_id from hex to base64 and some LND magic
170 try:
171 checking_id = base64.urlsafe_b64encode(bytes.fromhex(checking_id)).decode(
172 "ascii"
173 )
174 except ValueError:
175 return PaymentStatus(None)
176
177 url = f"/v2/router/track/{checking_id}"
178
179 # check payment.status:
180 # https://api.lightning.community/?python=#paymentpaymentstatus
181 statuses = {
182 "UNKNOWN": None,
183 "IN_FLIGHT": None,
184 "SUCCEEDED": True,
185 "FAILED": False,
186 }
187
188 async with self.client.stream("GET", url, timeout=None) as r:
189 async for json_line in r.aiter_lines():
190 try:
191 line = json.loads(json_line)
192 if line.get("error"):
193 logger.error(
194 line["error"]["message"]
195 if "message" in line["error"]
196 else line["error"]
197 )
198 return PaymentStatus(None)
199 payment = line.get("result")
200 if payment is not None and payment.get("status"):
201 return PaymentStatus(
202 paid=statuses[payment["status"]],
203 fee_msat=payment.get("fee_msat"),
204 preimage=payment.get("payment_preimage"),
205 )
206 else:
207 return PaymentStatus(None)
208 except Exception:
209 continue
210
211 return PaymentStatus(None)
212
213 async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:
214 while True:
215 try:
216 url = "/v1/invoices/subscribe"
217 async with self.client.stream("GET", url, timeout=None) as r:
218 async for line in r.aiter_lines():
219 try:
220 inv = json.loads(line)["result"]
221 if not inv["settled"]:
222 continue
223 except Exception:
224 continue
225
226 payment_hash = base64.b64decode(inv["r_hash"]).hex()
227 yield payment_hash
228 except Exception as exc:
229 logger.error(
230 f"lost connection to lnd invoices stream: '{exc}', retrying in 5"
231 " seconds"
232 )
233 await asyncio.sleep(5)
234
[end of lnbits/wallets/lndrest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lnbits/wallets/lndrest.py b/lnbits/wallets/lndrest.py
--- a/lnbits/wallets/lndrest.py
+++ b/lnbits/wallets/lndrest.py
@@ -195,6 +195,12 @@
if "message" in line["error"]
else line["error"]
)
+ if (
+ line["error"].get("code") == 5
+ and line["error"].get("message")
+ == "payment isn't initiated"
+ ):
+ return PaymentStatus(False)
return PaymentStatus(None)
payment = line.get("result")
if payment is not None and payment.get("status"):
| {"golden_diff": "diff --git a/lnbits/wallets/lndrest.py b/lnbits/wallets/lndrest.py\n--- a/lnbits/wallets/lndrest.py\n+++ b/lnbits/wallets/lndrest.py\n@@ -195,6 +195,12 @@\n if \"message\" in line[\"error\"]\n else line[\"error\"]\n )\n+ if (\n+ line[\"error\"].get(\"code\") == 5\n+ and line[\"error\"].get(\"message\")\n+ == \"payment isn't initiated\"\n+ ):\n+ return PaymentStatus(False)\n return PaymentStatus(None)\n payment = line.get(\"result\")\n if payment is not None and payment.get(\"status\"):\n", "issue": "LNDRestWallet: catch http errors\nThis PR catches the errors in `lndrest.py` that can happen when for example the LND certificate has expired. \r\n\r\nThis can lead to stuck payments.\n", "before_files": [{"content": "import asyncio\nimport base64\nimport hashlib\nimport json\nfrom typing import AsyncGenerator, Dict, Optional\n\nimport httpx\nfrom loguru import logger\n\nfrom lnbits.nodes.lndrest import LndRestNode\nfrom lnbits.settings import settings\n\nfrom .base import (\n InvoiceResponse,\n PaymentResponse,\n PaymentStatus,\n StatusResponse,\n Wallet,\n)\nfrom .macaroon import AESCipher, load_macaroon\n\n\nclass LndRestWallet(Wallet):\n \"\"\"https://api.lightning.community/rest/index.html#lnd-rest-api-reference\"\"\"\n\n __node_cls__ = LndRestNode\n\n def __init__(self):\n endpoint = settings.lnd_rest_endpoint\n cert = settings.lnd_rest_cert\n\n macaroon = (\n settings.lnd_rest_macaroon\n or settings.lnd_admin_macaroon\n or settings.lnd_rest_admin_macaroon\n or settings.lnd_invoice_macaroon\n or settings.lnd_rest_invoice_macaroon\n )\n\n encrypted_macaroon = settings.lnd_rest_macaroon_encrypted\n if encrypted_macaroon:\n macaroon = AESCipher(description=\"macaroon decryption\").decrypt(\n encrypted_macaroon\n )\n\n if not endpoint:\n raise Exception(\"cannot initialize lndrest: no endpoint\")\n\n if not macaroon:\n raise Exception(\"cannot initialize lndrest: no macaroon\")\n\n if not cert:\n logger.warning(\n \"no certificate for lndrest provided, this only works if you have a\"\n \" publicly issued certificate\"\n )\n\n endpoint = endpoint[:-1] if endpoint.endswith(\"/\") else endpoint\n endpoint = (\n f\"https://{endpoint}\" if not endpoint.startswith(\"http\") else endpoint\n )\n self.endpoint = endpoint\n self.macaroon = load_macaroon(macaroon)\n\n # if no cert provided it should be public so we set verify to True\n # and it will still check for validity of certificate and fail if its not valid\n # even on startup\n self.cert = cert or True\n\n self.auth = {\"Grpc-Metadata-macaroon\": self.macaroon}\n self.client = httpx.AsyncClient(\n base_url=self.endpoint, headers=self.auth, verify=self.cert\n )\n\n async def cleanup(self):\n try:\n await self.client.aclose()\n except RuntimeError as e:\n logger.warning(f\"Error closing wallet connection: {e}\")\n\n async def status(self) -> StatusResponse:\n try:\n r = await self.client.get(\"/v1/balance/channels\")\n r.raise_for_status()\n except (httpx.ConnectError, httpx.RequestError) as exc:\n return StatusResponse(f\"Unable to connect to {self.endpoint}. {exc}\", 0)\n\n try:\n data = r.json()\n if r.is_error:\n raise Exception\n except Exception:\n return StatusResponse(r.text[:200], 0)\n\n return StatusResponse(None, int(data[\"balance\"]) * 1000)\n\n async def create_invoice(\n self,\n amount: int,\n memo: Optional[str] = None,\n description_hash: Optional[bytes] = None,\n unhashed_description: Optional[bytes] = None,\n **kwargs,\n ) -> InvoiceResponse:\n data: Dict = {\"value\": amount, \"private\": True, \"memo\": memo or \"\"}\n if kwargs.get(\"expiry\"):\n data[\"expiry\"] = kwargs[\"expiry\"]\n if description_hash:\n data[\"description_hash\"] = base64.b64encode(description_hash).decode(\n \"ascii\"\n )\n elif unhashed_description:\n data[\"description_hash\"] = base64.b64encode(\n hashlib.sha256(unhashed_description).digest()\n ).decode(\"ascii\")\n\n r = await self.client.post(url=\"/v1/invoices\", json=data)\n\n if r.is_error:\n error_message = r.text\n try:\n error_message = r.json()[\"error\"]\n except Exception:\n pass\n return InvoiceResponse(False, None, None, error_message)\n\n data = r.json()\n payment_request = data[\"payment_request\"]\n payment_hash = base64.b64decode(data[\"r_hash\"]).hex()\n checking_id = payment_hash\n\n return InvoiceResponse(True, checking_id, payment_request, None)\n\n async def pay_invoice(self, bolt11: str, fee_limit_msat: int) -> PaymentResponse:\n # set the fee limit for the payment\n lnrpcFeeLimit = dict()\n lnrpcFeeLimit[\"fixed_msat\"] = f\"{fee_limit_msat}\"\n\n r = await self.client.post(\n url=\"/v1/channels/transactions\",\n json={\"payment_request\": bolt11, \"fee_limit\": lnrpcFeeLimit},\n timeout=None,\n )\n\n if r.is_error or r.json().get(\"payment_error\"):\n error_message = r.json().get(\"payment_error\") or r.text\n return PaymentResponse(False, None, None, None, error_message)\n\n data = r.json()\n checking_id = base64.b64decode(data[\"payment_hash\"]).hex()\n fee_msat = int(data[\"payment_route\"][\"total_fees_msat\"])\n preimage = base64.b64decode(data[\"payment_preimage\"]).hex()\n return PaymentResponse(True, checking_id, fee_msat, preimage, None)\n\n async def get_invoice_status(self, checking_id: str) -> PaymentStatus:\n r = await self.client.get(url=f\"/v1/invoice/{checking_id}\")\n\n if r.is_error or not r.json().get(\"settled\"):\n # this must also work when checking_id is not a hex recognizable by lnd\n # it will return an error and no \"settled\" attribute on the object\n return PaymentStatus(None)\n\n return PaymentStatus(True)\n\n async def get_payment_status(self, checking_id: str) -> PaymentStatus:\n \"\"\"\n This routine checks the payment status using routerpc.TrackPaymentV2.\n \"\"\"\n # convert checking_id from hex to base64 and some LND magic\n try:\n checking_id = base64.urlsafe_b64encode(bytes.fromhex(checking_id)).decode(\n \"ascii\"\n )\n except ValueError:\n return PaymentStatus(None)\n\n url = f\"/v2/router/track/{checking_id}\"\n\n # check payment.status:\n # https://api.lightning.community/?python=#paymentpaymentstatus\n statuses = {\n \"UNKNOWN\": None,\n \"IN_FLIGHT\": None,\n \"SUCCEEDED\": True,\n \"FAILED\": False,\n }\n\n async with self.client.stream(\"GET\", url, timeout=None) as r:\n async for json_line in r.aiter_lines():\n try:\n line = json.loads(json_line)\n if line.get(\"error\"):\n logger.error(\n line[\"error\"][\"message\"]\n if \"message\" in line[\"error\"]\n else line[\"error\"]\n )\n return PaymentStatus(None)\n payment = line.get(\"result\")\n if payment is not None and payment.get(\"status\"):\n return PaymentStatus(\n paid=statuses[payment[\"status\"]],\n fee_msat=payment.get(\"fee_msat\"),\n preimage=payment.get(\"payment_preimage\"),\n )\n else:\n return PaymentStatus(None)\n except Exception:\n continue\n\n return PaymentStatus(None)\n\n async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:\n while True:\n try:\n url = \"/v1/invoices/subscribe\"\n async with self.client.stream(\"GET\", url, timeout=None) as r:\n async for line in r.aiter_lines():\n try:\n inv = json.loads(line)[\"result\"]\n if not inv[\"settled\"]:\n continue\n except Exception:\n continue\n\n payment_hash = base64.b64decode(inv[\"r_hash\"]).hex()\n yield payment_hash\n except Exception as exc:\n logger.error(\n f\"lost connection to lnd invoices stream: '{exc}', retrying in 5\"\n \" seconds\"\n )\n await asyncio.sleep(5)\n", "path": "lnbits/wallets/lndrest.py"}]} | 2,951 | 159 |
gh_patches_debug_21868 | rasdani/github-patches | git_diff | streamlink__streamlink-4885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.btv: No playable streams found
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
The plugin is not functional. I am attaching a log.
### Debug log
```text
streamlink --loglevel debug "https://btvplus.bg/live/" best
[cli][debug] OS: Linux-5.15.0-50-generic-x86_64-with-glibc2.29
[cli][debug] Python: 3.8.10
[cli][debug] Streamlink: 5.0.1
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.0
[cli][debug] lxml: 4.6.4
[cli][debug] pycountry: 19.8.18
[cli][debug] pycryptodome: 3.9.9
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.26.0
[cli][debug] websocket-client: 1.2.1
[cli][debug] Arguments:
[cli][debug] url=https://btvplus.bg/live/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/
[utils.l10n][debug] Language code: bg_BG
error: No playable streams found on this URL: https://btvplus.bg/live/
```
</issue>
<code>
[start of src/streamlink/plugins/btv.py]
1 """
2 $description A privately owned Bulgarian live TV channel.
3 $url btvplus.bg
4 $type live
5 $region Bulgaria
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.hls import HLSStream
14
15 log = logging.getLogger(__name__)
16
17
18 @pluginmatcher(re.compile(
19 r"https?://(?:www\.)?btvplus\.bg/live/?"
20 ))
21 class BTV(Plugin):
22 URL_API = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
23
24 def _get_streams(self):
25 media_id = self.session.http.get(self.url, schema=validate.Schema(
26 re.compile(r"media_id=(\d+)"),
27 validate.any(None, validate.get(1)),
28 ))
29 if media_id is None:
30 return
31
32 stream_url = self.session.http.get(
33 self.URL_API,
34 params={
35 "media_id": media_id,
36 },
37 schema=validate.Schema(
38 validate.any(
39 validate.all(
40 validate.regex(re.compile(r"geo_blocked_stream")),
41 validate.get(0),
42 ),
43 validate.all(
44 validate.parse_json(),
45 {
46 "status": "ok",
47 "config": str,
48 },
49 validate.get("config"),
50 re.compile(r"src: \"(http.*?)\""),
51 validate.none_or_all(
52 validate.get(1),
53 validate.url(),
54 ),
55 ),
56 ),
57 ),
58 )
59 if not stream_url:
60 return
61
62 if stream_url == "geo_blocked_stream":
63 log.error("The content is not available in your region")
64 return
65
66 return HLSStream.parse_variant_playlist(self.session, stream_url)
67
68
69 __plugin__ = BTV
70
[end of src/streamlink/plugins/btv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -44,14 +44,11 @@
validate.parse_json(),
{
"status": "ok",
- "config": str,
+ "info": {
+ "file": validate.url(path=validate.endswith(".m3u8")),
+ },
},
- validate.get("config"),
- re.compile(r"src: \"(http.*?)\""),
- validate.none_or_all(
- validate.get(1),
- validate.url(),
- ),
+ validate.get(("info", "file")),
),
),
),
@@ -63,7 +60,7 @@
log.error("The content is not available in your region")
return
- return HLSStream.parse_variant_playlist(self.session, stream_url)
+ return {"live": HLSStream(self.session, stream_url)}
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -44,14 +44,11 @@\n validate.parse_json(),\n {\n \"status\": \"ok\",\n- \"config\": str,\n+ \"info\": {\n+ \"file\": validate.url(path=validate.endswith(\".m3u8\")),\n+ },\n },\n- validate.get(\"config\"),\n- re.compile(r\"src: \\\"(http.*?)\\\"\"),\n- validate.none_or_all(\n- validate.get(1),\n- validate.url(),\n- ),\n+ validate.get((\"info\", \"file\")),\n ),\n ),\n ),\n@@ -63,7 +60,7 @@\n log.error(\"The content is not available in your region\")\n return\n \n- return HLSStream.parse_variant_playlist(self.session, stream_url)\n+ return {\"live\": HLSStream(self.session, stream_url)}\n \n \n __plugin__ = BTV\n", "issue": "plugins.btv: No playable streams found\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nThe plugin is not functional. I am attaching a log.\n\n### Debug log\n\n```text\nstreamlink --loglevel debug \"https://btvplus.bg/live/\" best\r\n[cli][debug] OS: Linux-5.15.0-50-generic-x86_64-with-glibc2.29\r\n[cli][debug] Python: 3.8.10\r\n[cli][debug] Streamlink: 5.0.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.0\r\n[cli][debug] lxml: 4.6.4\r\n[cli][debug] pycountry: 19.8.18\r\n[cli][debug] pycryptodome: 3.9.9\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.26.0\r\n[cli][debug] websocket-client: 1.2.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://btvplus.bg/live/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/\r\n[utils.l10n][debug] Language code: bg_BG\r\nerror: No playable streams found on this URL: https://btvplus.bg/live/\n```\n\n", "before_files": [{"content": "\"\"\"\n$description A privately owned Bulgarian live TV channel.\n$url btvplus.bg\n$type live\n$region Bulgaria\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?btvplus\\.bg/live/?\"\n))\nclass BTV(Plugin):\n URL_API = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n def _get_streams(self):\n media_id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"media_id=(\\d+)\"),\n validate.any(None, validate.get(1)),\n ))\n if media_id is None:\n return\n\n stream_url = self.session.http.get(\n self.URL_API,\n params={\n \"media_id\": media_id,\n },\n schema=validate.Schema(\n validate.any(\n validate.all(\n validate.regex(re.compile(r\"geo_blocked_stream\")),\n validate.get(0),\n ),\n validate.all(\n validate.parse_json(),\n {\n \"status\": \"ok\",\n \"config\": str,\n },\n validate.get(\"config\"),\n re.compile(r\"src: \\\"(http.*?)\\\"\"),\n validate.none_or_all(\n validate.get(1),\n validate.url(),\n ),\n ),\n ),\n ),\n )\n if not stream_url:\n return\n\n if stream_url == \"geo_blocked_stream\":\n log.error(\"The content is not available in your region\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,523 | 226 |
gh_patches_debug_1546 | rasdani/github-patches | git_diff | lightly-ai__lightly-1450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VICReg Loss De-Means Twice?
https://github.com/lightly-ai/lightly/blob/66ad1b40ebf3b53512703c774988211ce283211f/lightly/loss/vicreg_loss.py#L128-L129
I think the VICReg loss removes the mean, then calls `.var()` which also de-means (see: https://pytorch.org/docs/stable/generated/torch.var.html).
If I understand correctly, that seems unnecessary?
</issue>
<code>
[start of lightly/loss/vicreg_loss.py]
1 import torch
2 import torch.distributed as dist
3 import torch.nn.functional as F
4 from torch import Tensor
5
6 from lightly.utils.dist import gather
7
8
9 class VICRegLoss(torch.nn.Module):
10 """Implementation of the VICReg loss [0].
11
12 This implementation is based on the code published by the authors [1].
13
14 - [0] VICReg, 2022, https://arxiv.org/abs/2105.04906
15 - [1] https://github.com/facebookresearch/vicreg/
16
17 Attributes:
18 lambda_param:
19 Scaling coefficient for the invariance term of the loss.
20 mu_param:
21 Scaling coefficient for the variance term of the loss.
22 nu_param:
23 Scaling coefficient for the covariance term of the loss.
24 gather_distributed:
25 If True then the cross-correlation matrices from all gpus are gathered and
26 summed before the loss calculation.
27 eps:
28 Epsilon for numerical stability.
29
30 Examples:
31
32 >>> # initialize loss function
33 >>> loss_fn = VICRegLoss()
34 >>>
35 >>> # generate two random transforms of images
36 >>> t0 = transforms(images)
37 >>> t1 = transforms(images)
38 >>>
39 >>> # feed through model
40 >>> out0, out1 = model(t0, t1)
41 >>>
42 >>> # calculate loss
43 >>> loss = loss_fn(out0, out1)
44 """
45
46 def __init__(
47 self,
48 lambda_param: float = 25.0,
49 mu_param: float = 25.0,
50 nu_param: float = 1.0,
51 gather_distributed: bool = False,
52 eps=0.0001,
53 ):
54 super(VICRegLoss, self).__init__()
55 if gather_distributed and not dist.is_available():
56 raise ValueError(
57 "gather_distributed is True but torch.distributed is not available. "
58 "Please set gather_distributed=False or install a torch version with "
59 "distributed support."
60 )
61
62 self.lambda_param = lambda_param
63 self.mu_param = mu_param
64 self.nu_param = nu_param
65 self.gather_distributed = gather_distributed
66 self.eps = eps
67
68 def forward(self, z_a: torch.Tensor, z_b: torch.Tensor) -> torch.Tensor:
69 """Returns VICReg loss.
70
71 Args:
72 z_a:
73 Tensor with shape (batch_size, ..., dim).
74 z_b:
75 Tensor with shape (batch_size, ..., dim).
76 """
77 assert (
78 z_a.shape[0] > 1 and z_b.shape[0] > 1
79 ), f"z_a and z_b must have batch size > 1 but found {z_a.shape[0]} and {z_b.shape[0]}"
80 assert (
81 z_a.shape == z_b.shape
82 ), f"z_a and z_b must have same shape but found {z_a.shape} and {z_b.shape}."
83
84 # invariance term of the loss
85 inv_loss = invariance_loss(x=z_a, y=z_b)
86
87 # gather all batches
88 if self.gather_distributed and dist.is_initialized():
89 world_size = dist.get_world_size()
90 if world_size > 1:
91 z_a = torch.cat(gather(z_a), dim=0)
92 z_b = torch.cat(gather(z_b), dim=0)
93
94 var_loss = 0.5 * (
95 variance_loss(x=z_a, eps=self.eps) + variance_loss(x=z_b, eps=self.eps)
96 )
97 cov_loss = covariance_loss(x=z_a) + covariance_loss(x=z_b)
98
99 loss = (
100 self.lambda_param * inv_loss
101 + self.mu_param * var_loss
102 + self.nu_param * cov_loss
103 )
104 return loss
105
106
107 def invariance_loss(x: Tensor, y: Tensor) -> Tensor:
108 """Returns VICReg invariance loss.
109
110 Args:
111 x:
112 Tensor with shape (batch_size, ..., dim).
113 y:
114 Tensor with shape (batch_size, ..., dim).
115 """
116 return F.mse_loss(x, y)
117
118
119 def variance_loss(x: Tensor, eps: float = 0.0001) -> Tensor:
120 """Returns VICReg variance loss.
121
122 Args:
123 x:
124 Tensor with shape (batch_size, ..., dim).
125 eps:
126 Epsilon for numerical stability.
127 """
128 x = x - x.mean(dim=0)
129 std = torch.sqrt(x.var(dim=0) + eps)
130 loss = torch.mean(F.relu(1.0 - std))
131 return loss
132
133
134 def covariance_loss(x: Tensor) -> Tensor:
135 """Returns VICReg covariance loss.
136
137 Generalized version of the covariance loss with support for tensors with more than
138 two dimensions. Adapted from VICRegL:
139 https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L299
140
141 Args:
142 x:
143 Tensor with shape (batch_size, ..., dim).
144 """
145 x = x - x.mean(dim=0)
146 batch_size = x.size(0)
147 dim = x.size(-1)
148 # nondiag_mask has shape (dim, dim) with 1s on all non-diagonal entries.
149 nondiag_mask = ~torch.eye(dim, device=x.device, dtype=torch.bool)
150 # cov has shape (..., dim, dim)
151 cov = torch.einsum("b...c,b...d->...cd", x, x) / (batch_size - 1)
152 loss = cov[..., nondiag_mask].pow(2).sum(-1) / dim
153 return loss.mean()
154
[end of lightly/loss/vicreg_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/loss/vicreg_loss.py b/lightly/loss/vicreg_loss.py
--- a/lightly/loss/vicreg_loss.py
+++ b/lightly/loss/vicreg_loss.py
@@ -125,7 +125,6 @@
eps:
Epsilon for numerical stability.
"""
- x = x - x.mean(dim=0)
std = torch.sqrt(x.var(dim=0) + eps)
loss = torch.mean(F.relu(1.0 - std))
return loss
| {"golden_diff": "diff --git a/lightly/loss/vicreg_loss.py b/lightly/loss/vicreg_loss.py\n--- a/lightly/loss/vicreg_loss.py\n+++ b/lightly/loss/vicreg_loss.py\n@@ -125,7 +125,6 @@\n eps:\n Epsilon for numerical stability.\n \"\"\"\n- x = x - x.mean(dim=0)\n std = torch.sqrt(x.var(dim=0) + eps)\n loss = torch.mean(F.relu(1.0 - std))\n return loss\n", "issue": "VICReg Loss De-Means Twice?\nhttps://github.com/lightly-ai/lightly/blob/66ad1b40ebf3b53512703c774988211ce283211f/lightly/loss/vicreg_loss.py#L128-L129\r\n\r\nI think the VICReg loss removes the mean, then calls `.var()` which also de-means (see: https://pytorch.org/docs/stable/generated/torch.var.html). \r\n\r\nIf I understand correctly, that seems unnecessary?\n", "before_files": [{"content": "import torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom lightly.utils.dist import gather\n\n\nclass VICRegLoss(torch.nn.Module):\n \"\"\"Implementation of the VICReg loss [0].\n\n This implementation is based on the code published by the authors [1].\n\n - [0] VICReg, 2022, https://arxiv.org/abs/2105.04906\n - [1] https://github.com/facebookresearch/vicreg/\n\n Attributes:\n lambda_param:\n Scaling coefficient for the invariance term of the loss.\n mu_param:\n Scaling coefficient for the variance term of the loss.\n nu_param:\n Scaling coefficient for the covariance term of the loss.\n gather_distributed:\n If True then the cross-correlation matrices from all gpus are gathered and\n summed before the loss calculation.\n eps:\n Epsilon for numerical stability.\n\n Examples:\n\n >>> # initialize loss function\n >>> loss_fn = VICRegLoss()\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through model\n >>> out0, out1 = model(t0, t1)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(out0, out1)\n \"\"\"\n\n def __init__(\n self,\n lambda_param: float = 25.0,\n mu_param: float = 25.0,\n nu_param: float = 1.0,\n gather_distributed: bool = False,\n eps=0.0001,\n ):\n super(VICRegLoss, self).__init__()\n if gather_distributed and not dist.is_available():\n raise ValueError(\n \"gather_distributed is True but torch.distributed is not available. \"\n \"Please set gather_distributed=False or install a torch version with \"\n \"distributed support.\"\n )\n\n self.lambda_param = lambda_param\n self.mu_param = mu_param\n self.nu_param = nu_param\n self.gather_distributed = gather_distributed\n self.eps = eps\n\n def forward(self, z_a: torch.Tensor, z_b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns VICReg loss.\n\n Args:\n z_a:\n Tensor with shape (batch_size, ..., dim).\n z_b:\n Tensor with shape (batch_size, ..., dim).\n \"\"\"\n assert (\n z_a.shape[0] > 1 and z_b.shape[0] > 1\n ), f\"z_a and z_b must have batch size > 1 but found {z_a.shape[0]} and {z_b.shape[0]}\"\n assert (\n z_a.shape == z_b.shape\n ), f\"z_a and z_b must have same shape but found {z_a.shape} and {z_b.shape}.\"\n\n # invariance term of the loss\n inv_loss = invariance_loss(x=z_a, y=z_b)\n\n # gather all batches\n if self.gather_distributed and dist.is_initialized():\n world_size = dist.get_world_size()\n if world_size > 1:\n z_a = torch.cat(gather(z_a), dim=0)\n z_b = torch.cat(gather(z_b), dim=0)\n\n var_loss = 0.5 * (\n variance_loss(x=z_a, eps=self.eps) + variance_loss(x=z_b, eps=self.eps)\n )\n cov_loss = covariance_loss(x=z_a) + covariance_loss(x=z_b)\n\n loss = (\n self.lambda_param * inv_loss\n + self.mu_param * var_loss\n + self.nu_param * cov_loss\n )\n return loss\n\n\ndef invariance_loss(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Returns VICReg invariance loss.\n\n Args:\n x:\n Tensor with shape (batch_size, ..., dim).\n y:\n Tensor with shape (batch_size, ..., dim).\n \"\"\"\n return F.mse_loss(x, y)\n\n\ndef variance_loss(x: Tensor, eps: float = 0.0001) -> Tensor:\n \"\"\"Returns VICReg variance loss.\n\n Args:\n x:\n Tensor with shape (batch_size, ..., dim).\n eps:\n Epsilon for numerical stability.\n \"\"\"\n x = x - x.mean(dim=0)\n std = torch.sqrt(x.var(dim=0) + eps)\n loss = torch.mean(F.relu(1.0 - std))\n return loss\n\n\ndef covariance_loss(x: Tensor) -> Tensor:\n \"\"\"Returns VICReg covariance loss.\n\n Generalized version of the covariance loss with support for tensors with more than\n two dimensions. Adapted from VICRegL:\n https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L299\n\n Args:\n x:\n Tensor with shape (batch_size, ..., dim).\n \"\"\"\n x = x - x.mean(dim=0)\n batch_size = x.size(0)\n dim = x.size(-1)\n # nondiag_mask has shape (dim, dim) with 1s on all non-diagonal entries.\n nondiag_mask = ~torch.eye(dim, device=x.device, dtype=torch.bool)\n # cov has shape (..., dim, dim)\n cov = torch.einsum(\"b...c,b...d->...cd\", x, x) / (batch_size - 1)\n loss = cov[..., nondiag_mask].pow(2).sum(-1) / dim\n return loss.mean()\n", "path": "lightly/loss/vicreg_loss.py"}]} | 2,283 | 118 |
gh_patches_debug_6249 | rasdani/github-patches | git_diff | microsoft__ptvsd-641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors logged on VSTS
```
2018-07-05T19:13:17.5780150Z .Traceback (most recent call last):
2018-07-05T19:13:17.5795340Z File "/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py", line 749, in process_net_command
2018-07-05T19:13:17.5813150Z py_db.enable_output_redirection('STDOUT' in text, 'STDERR' in text)
2018-07-05T19:13:17.5831030Z File "/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/pydevd.py", line 361, in enable_output_redirection
2018-07-05T19:13:17.5847040Z init_stdout_redirect()
2018-07-05T19:13:17.5862230Z File "/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/pydevd.py", line 1199, in init_stdout_redirect
2018-07-05T19:13:17.5878570Z sys.stdout = pydevd_io.IORedirector(original, sys._pydevd_out_buffer_, wrap_buffer) #@UndefinedVariable
2018-07-05T19:13:17.5895080Z File "/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py", line 24, in __init__
2018-07-05T19:13:17.5913010Z self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
2018-07-05T19:13:17.5939400Z AttributeError: '_DuplicateWriter' object has no attribute 'buffer'
```
The same errors are logged for Linux and Mac OS.
I'm using a Mac and cannot replicate this error.
/cc @fabioz
</issue>
<code>
[start of ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py]
1 from _pydevd_bundle import pydevd_constants
2
3 IS_PY3K = pydevd_constants.IS_PY3K
4
5 class IORedirector:
6 '''
7 This class works to wrap a stream (stdout/stderr) with an additional redirect.
8 '''
9
10 def __init__(self, original, new_redirect, wrap_buffer=False):
11 '''
12 :param stream original:
13 The stream to be wrapped (usually stdout/stderr).
14
15 :param stream new_redirect:
16 Usually IOBuf (below).
17
18 :param bool wrap_buffer:
19 Whether to create a buffer attribute (needed to mimick python 3 s
20 tdout/stderr which has a buffer to write binary data).
21 '''
22 self._redirect_to = (original, new_redirect)
23 if wrap_buffer:
24 self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
25
26 def write(self, s):
27 # Note that writing to the original stream may fail for some reasons
28 # (such as trying to write something that's not a string or having it closed).
29 for r in self._redirect_to:
30 r.write(s)
31
32 def isatty(self):
33 return self._redirect_to[0].isatty()
34
35 def flush(self):
36 for r in self._redirect_to:
37 r.flush()
38
39 def __getattr__(self, name):
40 for r in self._redirect_to:
41 if hasattr(r, name):
42 return getattr(r, name)
43 raise AttributeError(name)
44
45 class IOBuf:
46 '''This class works as a replacement for stdio and stderr.
47 It is a buffer and when its contents are requested, it will erase what
48 it has so far so that the next return will not return the same contents again.
49 '''
50 def __init__(self):
51 self.buflist = []
52 import os
53 self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
54
55 def getvalue(self):
56 b = self.buflist
57 self.buflist = [] # clear it
58 return ''.join(b) # bytes on py2, str on py3.
59
60 def write(self, s):
61 if not IS_PY3K:
62 if isinstance(s, unicode):
63 # can't use 'errors' as kwargs in py 2.6
64 s = s.encode(self.encoding, 'replace')
65 else:
66 if isinstance(s, bytes):
67 s = s.decode(self.encoding, errors='replace')
68 self.buflist.append(s)
69
70 def isatty(self):
71 return False
72
73 def flush(self):
74 pass
75
76 def empty(self):
77 return len(self.buflist) == 0
78
79 class _RedirectionsHolder:
80 _stack_stdout = []
81 _stack_stderr = []
82
83
84 def start_redirect(keep_original_redirection=False, std='stdout'):
85 '''
86 @param std: 'stdout', 'stderr', or 'both'
87 '''
88 import sys
89 buf = IOBuf()
90
91 if std == 'both':
92 config_stds = ['stdout', 'stderr']
93 else:
94 config_stds = [std]
95
96 for std in config_stds:
97 original = getattr(sys, std)
98 stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
99 stack.append(original)
100
101 if keep_original_redirection:
102 setattr(sys, std, IORedirector(getattr(sys, std), buf))
103 else:
104 setattr(sys, std, buf)
105 return buf
106
107
108 def end_redirect(std='stdout'):
109 import sys
110 if std == 'both':
111 config_stds = ['stdout', 'stderr']
112 else:
113 config_stds = [std]
114 for std in config_stds:
115 stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
116 setattr(sys, std, stack.pop())
117
118
[end of ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py
--- a/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py
+++ b/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py
@@ -20,7 +20,7 @@
tdout/stderr which has a buffer to write binary data).
'''
self._redirect_to = (original, new_redirect)
- if wrap_buffer:
+ if wrap_buffer and hasattr(original, 'buffer'):
self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
def write(self, s):
| {"golden_diff": "diff --git a/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n--- a/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n+++ b/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n@@ -20,7 +20,7 @@\n tdout/stderr which has a buffer to write binary data).\n '''\n self._redirect_to = (original, new_redirect)\n- if wrap_buffer:\n+ if wrap_buffer and hasattr(original, 'buffer'):\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n \n def write(self, s):\n", "issue": "Errors logged on VSTS \n```\r\n2018-07-05T19:13:17.5780150Z .Traceback (most recent call last):\r\n2018-07-05T19:13:17.5795340Z File \"/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py\", line 749, in process_net_command\r\n2018-07-05T19:13:17.5813150Z py_db.enable_output_redirection('STDOUT' in text, 'STDERR' in text)\r\n2018-07-05T19:13:17.5831030Z File \"/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/pydevd.py\", line 361, in enable_output_redirection\r\n2018-07-05T19:13:17.5847040Z init_stdout_redirect()\r\n2018-07-05T19:13:17.5862230Z File \"/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/pydevd.py\", line 1199, in init_stdout_redirect\r\n2018-07-05T19:13:17.5878570Z sys.stdout = pydevd_io.IORedirector(original, sys._pydevd_out_buffer_, wrap_buffer) #@UndefinedVariable\r\n2018-07-05T19:13:17.5895080Z File \"/Users/vsts/agent/2.134.2/work/1/s/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\", line 24, in __init__\r\n2018-07-05T19:13:17.5913010Z self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\r\n2018-07-05T19:13:17.5939400Z AttributeError: '_DuplicateWriter' object has no attribute 'buffer'\r\n```\r\n\r\nThe same errors are logged for Linux and Mac OS.\r\nI'm using a Mac and cannot replicate this error.\r\n\r\n/cc @fabioz \n", "before_files": [{"content": "from _pydevd_bundle import pydevd_constants\n\nIS_PY3K = pydevd_constants.IS_PY3K\n\nclass IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n '''\n\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n The stream to be wrapped (usually stdout/stderr).\n\n :param stream new_redirect:\n Usually IOBuf (below).\n\n :param bool wrap_buffer:\n Whether to create a buffer attribute (needed to mimick python 3 s\n tdout/stderr which has a buffer to write binary data).\n '''\n self._redirect_to = (original, new_redirect)\n if wrap_buffer:\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n\n def write(self, s):\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n r.write(s)\n\n def isatty(self):\n return self._redirect_to[0].isatty()\n\n def flush(self):\n for r in self._redirect_to:\n r.flush()\n\n def __getattr__(self, name):\n for r in self._redirect_to:\n if hasattr(r, name):\n return getattr(r, name)\n raise AttributeError(name)\n\nclass IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n def __init__(self):\n self.buflist = []\n import os\n self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')\n\n def getvalue(self):\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n \n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n # can't use 'errors' as kwargs in py 2.6\n s = s.encode(self.encoding, 'replace')\n else:\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors='replace')\n self.buflist.append(s)\n\n def isatty(self):\n return False\n\n def flush(self):\n pass\n\n def empty(self):\n return len(self.buflist) == 0\n\nclass _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n\n\ndef start_redirect(keep_original_redirection=False, std='stdout'):\n '''\n @param std: 'stdout', 'stderr', or 'both'\n '''\n import sys\n buf = IOBuf()\n\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n\n for std in config_stds:\n original = getattr(sys, std)\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n stack.append(original)\n\n if keep_original_redirection:\n setattr(sys, std, IORedirector(getattr(sys, std), buf))\n else:\n setattr(sys, std, buf)\n return buf\n\n\ndef end_redirect(std='stdout'):\n import sys\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n for std in config_stds:\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n setattr(sys, std, stack.pop())\n\n", "path": "ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py"}]} | 2,255 | 179 |
gh_patches_debug_32008 | rasdani/github-patches | git_diff | microsoft__AzureTRE-1039 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API app not reporting requests to AppInsights
**Description**
Ensure opencensus reports http requests to app insights.
</issue>
<code>
[start of api_app/main.py]
1 import logging
2 import uvicorn
3
4 from fastapi import FastAPI
5 from fastapi.exceptions import RequestValidationError
6 from fastapi_utils.tasks import repeat_every
7 from starlette.exceptions import HTTPException
8 from starlette.middleware.errors import ServerErrorMiddleware
9
10 from api.routes.api import router as api_router
11 from api.routes.api import tags_metadata
12 from api.errors.http_error import http_error_handler
13 from api.errors.validation_error import http422_error_handler
14 from api.errors.generic_error import generic_error_handler
15 from core import config
16 from core.events import create_start_app_handler, create_stop_app_handler
17 from services.logging import disable_unwanted_loggers, initialize_logging
18 from service_bus.deployment_status_update import receive_message_and_update_deployment
19
20
21 def get_application() -> FastAPI:
22 application = FastAPI(
23 title=config.PROJECT_NAME,
24 debug=config.DEBUG,
25 description=config.API_DESCRIPTION,
26 version=config.VERSION,
27 docs_url="/api/docs",
28 swagger_ui_oauth2_redirect_url="/api/docs/oauth2-redirect",
29 swagger_ui_init_oauth={
30 "usePkceWithAuthorizationCodeGrant": True,
31 "clientId": config.SWAGGER_UI_CLIENT_ID,
32 "scopes": ["openid", "offline_access", f"api://{config.API_CLIENT_ID}/Workspace.Read", f"api://{config.API_CLIENT_ID}/Workspace.Write"]
33 },
34 openapi_tags=tags_metadata
35 )
36
37 application.add_event_handler("startup", create_start_app_handler(application))
38 application.add_event_handler("shutdown", create_stop_app_handler(application))
39
40 application.add_middleware(ServerErrorMiddleware, handler=generic_error_handler)
41 application.add_exception_handler(HTTPException, http_error_handler)
42 application.add_exception_handler(RequestValidationError, http422_error_handler)
43
44 application.include_router(api_router, prefix=config.API_PREFIX)
45 return application
46
47
48 app = get_application()
49
50
51 @app.on_event("startup")
52 async def initialize_logging_on_startup():
53 if config.DEBUG:
54 initialize_logging(logging.DEBUG)
55 else:
56 initialize_logging(logging.INFO)
57
58 disable_unwanted_loggers()
59
60
61 @app.on_event("startup")
62 @repeat_every(seconds=20, wait_first=True, logger=logging.getLogger())
63 async def update_deployment_status() -> None:
64 await receive_message_and_update_deployment(app)
65
66
67 if __name__ == "__main__":
68 uvicorn.run(app, host="0.0.0.0", port=8000)
69
[end of api_app/main.py]
[start of api_app/_version.py]
1 __version__ = "0.1.1"
2
[end of api_app/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api_app/_version.py b/api_app/_version.py
--- a/api_app/_version.py
+++ b/api_app/_version.py
@@ -1 +1 @@
-__version__ = "0.1.1"
+__version__ = "0.1.3"
diff --git a/api_app/main.py b/api_app/main.py
--- a/api_app/main.py
+++ b/api_app/main.py
@@ -1,7 +1,8 @@
import logging
+import os
import uvicorn
-from fastapi import FastAPI
+from fastapi import FastAPI, Request
from fastapi.exceptions import RequestValidationError
from fastapi_utils.tasks import repeat_every
from starlette.exceptions import HTTPException
@@ -17,6 +18,16 @@
from services.logging import disable_unwanted_loggers, initialize_logging
from service_bus.deployment_status_update import receive_message_and_update_deployment
+# Opencensus Azure imports
+from opencensus.ext.azure.trace_exporter import AzureExporter
+from opencensus.trace.attributes_helper import COMMON_ATTRIBUTES
+from opencensus.trace.samplers import ProbabilitySampler
+from opencensus.trace.span import SpanKind
+from opencensus.trace.tracer import Tracer
+
+HTTP_URL = COMMON_ATTRIBUTES['HTTP_URL']
+HTTP_STATUS_CODE = COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
+
def get_application() -> FastAPI:
application = FastAPI(
@@ -64,5 +75,19 @@
await receive_message_and_update_deployment(app)
[email protected]("http")
+async def add_process_time_header(request: Request, call_next):
+ tracer = Tracer(exporter=AzureExporter(connection_string=f'InstrumentationKey={os.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")}'), sampler=ProbabilitySampler(1.0))
+ with tracer.span("main") as span:
+ span.span_kind = SpanKind.SERVER
+
+ response = await call_next(request)
+
+ tracer.add_attribute_to_current_span(attribute_key=HTTP_STATUS_CODE, attribute_value=response.status_code)
+ tracer.add_attribute_to_current_span(attribute_key=HTTP_URL, attribute_value=str(request.url))
+
+ return response
+
+
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| {"golden_diff": "diff --git a/api_app/_version.py b/api_app/_version.py\n--- a/api_app/_version.py\n+++ b/api_app/_version.py\n@@ -1 +1 @@\n-__version__ = \"0.1.1\"\n+__version__ = \"0.1.3\"\ndiff --git a/api_app/main.py b/api_app/main.py\n--- a/api_app/main.py\n+++ b/api_app/main.py\n@@ -1,7 +1,8 @@\n import logging\n+import os\n import uvicorn\n \n-from fastapi import FastAPI\n+from fastapi import FastAPI, Request\n from fastapi.exceptions import RequestValidationError\n from fastapi_utils.tasks import repeat_every\n from starlette.exceptions import HTTPException\n@@ -17,6 +18,16 @@\n from services.logging import disable_unwanted_loggers, initialize_logging\n from service_bus.deployment_status_update import receive_message_and_update_deployment\n \n+# Opencensus Azure imports\n+from opencensus.ext.azure.trace_exporter import AzureExporter\n+from opencensus.trace.attributes_helper import COMMON_ATTRIBUTES\n+from opencensus.trace.samplers import ProbabilitySampler\n+from opencensus.trace.span import SpanKind\n+from opencensus.trace.tracer import Tracer\n+\n+HTTP_URL = COMMON_ATTRIBUTES['HTTP_URL']\n+HTTP_STATUS_CODE = COMMON_ATTRIBUTES['HTTP_STATUS_CODE']\n+\n \n def get_application() -> FastAPI:\n application = FastAPI(\n@@ -64,5 +75,19 @@\n await receive_message_and_update_deployment(app)\n \n \[email protected](\"http\")\n+async def add_process_time_header(request: Request, call_next):\n+ tracer = Tracer(exporter=AzureExporter(connection_string=f'InstrumentationKey={os.getenv(\"APPINSIGHTS_INSTRUMENTATIONKEY\")}'), sampler=ProbabilitySampler(1.0))\n+ with tracer.span(\"main\") as span:\n+ span.span_kind = SpanKind.SERVER\n+\n+ response = await call_next(request)\n+\n+ tracer.add_attribute_to_current_span(attribute_key=HTTP_STATUS_CODE, attribute_value=response.status_code)\n+ tracer.add_attribute_to_current_span(attribute_key=HTTP_URL, attribute_value=str(request.url))\n+\n+ return response\n+\n+\n if __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n", "issue": "API app not reporting requests to AppInsights\n**Description**\r\nEnsure opencensus reports http requests to app insights.\n", "before_files": [{"content": "import logging\nimport uvicorn\n\nfrom fastapi import FastAPI\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi_utils.tasks import repeat_every\nfrom starlette.exceptions import HTTPException\nfrom starlette.middleware.errors import ServerErrorMiddleware\n\nfrom api.routes.api import router as api_router\nfrom api.routes.api import tags_metadata\nfrom api.errors.http_error import http_error_handler\nfrom api.errors.validation_error import http422_error_handler\nfrom api.errors.generic_error import generic_error_handler\nfrom core import config\nfrom core.events import create_start_app_handler, create_stop_app_handler\nfrom services.logging import disable_unwanted_loggers, initialize_logging\nfrom service_bus.deployment_status_update import receive_message_and_update_deployment\n\n\ndef get_application() -> FastAPI:\n application = FastAPI(\n title=config.PROJECT_NAME,\n debug=config.DEBUG,\n description=config.API_DESCRIPTION,\n version=config.VERSION,\n docs_url=\"/api/docs\",\n swagger_ui_oauth2_redirect_url=\"/api/docs/oauth2-redirect\",\n swagger_ui_init_oauth={\n \"usePkceWithAuthorizationCodeGrant\": True,\n \"clientId\": config.SWAGGER_UI_CLIENT_ID,\n \"scopes\": [\"openid\", \"offline_access\", f\"api://{config.API_CLIENT_ID}/Workspace.Read\", f\"api://{config.API_CLIENT_ID}/Workspace.Write\"]\n },\n openapi_tags=tags_metadata\n )\n\n application.add_event_handler(\"startup\", create_start_app_handler(application))\n application.add_event_handler(\"shutdown\", create_stop_app_handler(application))\n\n application.add_middleware(ServerErrorMiddleware, handler=generic_error_handler)\n application.add_exception_handler(HTTPException, http_error_handler)\n application.add_exception_handler(RequestValidationError, http422_error_handler)\n\n application.include_router(api_router, prefix=config.API_PREFIX)\n return application\n\n\napp = get_application()\n\n\[email protected]_event(\"startup\")\nasync def initialize_logging_on_startup():\n if config.DEBUG:\n initialize_logging(logging.DEBUG)\n else:\n initialize_logging(logging.INFO)\n\n disable_unwanted_loggers()\n\n\[email protected]_event(\"startup\")\n@repeat_every(seconds=20, wait_first=True, logger=logging.getLogger())\nasync def update_deployment_status() -> None:\n await receive_message_and_update_deployment(app)\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n", "path": "api_app/main.py"}, {"content": "__version__ = \"0.1.1\"\n", "path": "api_app/_version.py"}]} | 1,222 | 501 |
gh_patches_debug_24705 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Users with many allergies or long text overflows their cell in the pdf
Example from prod can be seen on Immball 2015.
</issue>
<code>
[start of apps/events/pdf_generator.py]
1 # -*- coding: utf-8 -*-
2 from django.contrib.auth.decorators import login_required, user_passes_test
3
4 from pdfdocument.utils import pdf_response
5 from reportlab.platypus import TableStyle, Paragraph
6 from reportlab.lib import colors
7 from reportlab.lib.styles import getSampleStyleSheet
8
9 class EventPDF(object):
10
11 event = None
12 attendees = None
13 waiters = None
14 reservees = None
15 attendee_table_data = None
16 waiters_table_data = None
17 reservee_table_data = None
18 allergies_table_data = None
19
20 def __init__(self, event):
21 self.event = event
22 attendee_qs = event.attendance_event.attendees_qs
23 self.attendees = sorted(attendee_qs, key=lambda attendee: attendee.user.last_name)
24 self.waiters = event.attendance_event.waitlist_qs
25 self.reservees = event.attendance_event.reservees_qs
26 self.attendee_table_data = [(u'Navn', u'Klasse', u'Studie', u'Telefon'), ]
27 self.waiters_table_data = [(u'Navn', u'Klasse', u'Studie', u'Telefon'), ]
28 self.reservee_table_data = [(u'Navn', u'Notat'), ]
29 self.allergies_table_data = [(u'Allergisk mot', u'Navn'), ]
30
31 self.full_span_attendee_lines = []
32 self.full_span_waiters_lines = []
33 self.create_attendees_table_data()
34 self.create_waiters_table_data()
35 self.create_reservees_table_data()
36
37
38 # Create table data for attendees with a spot
39 def create_attendees_table_data(self):
40 i = 1
41
42 for attendee in self.attendees:
43 user = attendee.user
44 self.attendee_table_data.append((
45 create_body_text("%s, %s" % (user.last_name, user.first_name)),
46 user.year,
47 create_body_text(user.get_field_of_study_display()),
48 user.phone_number
49 ))
50
51 if attendee.note:
52 self.attendee_table_data.append((create_body_text(u'Notat for %s: ' % attendee.user.first_name + attendee.note),))
53 i += 1
54 self.full_span_attendee_lines.append(i)
55 if user.allergies:
56 self.allergies_table_data.append((user.allergies, user.get_full_name(),))
57
58 i += 1
59
60 # Create table data for attendees waiting for a spot
61 def create_waiters_table_data(self):
62 i = 1
63
64 for attendee in self.waiters:
65 user = attendee.user
66 self.waiters_table_data.append((
67 create_body_text("%s, %s" % (user.last_name, user.first_name)),
68 user.year,
69 create_body_text(user.get_field_of_study_display()),
70 user.phone_number
71 ))
72
73 if attendee.note:
74 self.waiters_table_data.append((create_body_text(u'Notat for %s: ' % attendee.user.first_name + attendee.note),))
75 i += 1
76 self.full_span_waiters_lines.append(i)
77 if user.allergies:
78 self.allergies_table_data.append((user.allergies, user.get_full_name(),))
79
80 i += 1
81
82 def create_reservees_table_data(self):
83 for reservee in self.reservees:
84 self.reservee_table_data.append((
85 create_body_text(reservee.name),
86 create_body_text(reservee.note)
87 ))
88 if reservee.allergies:
89 self.allergies_table_data.append((
90 create_body_text(reservee.allergies),
91 create_body_text(reservee.name),
92 ))
93 if reservee.allergies:
94 #self.allergies_table_data = self.allergies_table_data + [reservee.name + ' ' + reservee.allergies]
95 pass
96
97 def attendee_column_widths(self):
98 return (185, 40, 170, 75)
99
100 def reservee_column_widths(self):
101 return (185, 285)
102
103 def allergies_column_widths(self):
104 return (285, 185)
105
106 def render_pdf(self):
107 pdf, response = pdf_response(self.event.title + u" attendees")
108 pdf.init_report()
109
110 pdf.p(self.event.title, style=create_paragraph_style(font_size=18))
111 pdf.spacer(10)
112 pdf.p(self.event.event_start.strftime('%d. %B %Y'), create_paragraph_style(font_size=9))
113 pdf.spacer(height=25)
114
115 pdf.p(u"Påmeldte", style=create_paragraph_style(font_size=14))
116 pdf.spacer(height=20)
117 pdf.table(self.attendee_table_data, self.attendee_column_widths(), style=get_table_style(self.full_span_attendee_lines))
118 pdf.spacer(height=25)
119
120 if self.waiters.count() > 0:
121 pdf.p(u"Venteliste", style=create_paragraph_style(font_size=14))
122 pdf.spacer(height=20)
123 pdf.table(self.waiters_table_data, self.attendee_column_widths(), style=get_table_style(self.full_span_waiters_lines))
124 pdf.spacer(height=25)
125
126 if self.reservees and self.reservees.count() > 0:
127 pdf.p(u"Reservasjoner", style=create_paragraph_style(font_size=14))
128 pdf.spacer(height=20)
129 pdf.table(self.reservee_table_data, self.reservee_column_widths(), style=get_table_style())
130 pdf.spacer(height=25)
131
132 if self.allergies_table_data:
133 pdf.p(u"Allergier", style=create_paragraph_style(font_size=14))
134 pdf.spacer(height=20)
135 pdf.table(self.allergies_table_data, self.allergies_column_widths(), style=get_table_style())
136 pdf.spacer(height=25)
137
138 pdf.generate()
139 return response
140
141
142 # Table style for framed table with grids
143 def get_table_style(full_spans=None):
144 style = [
145 ('GRID',(0,0),(-1,-1),0.5,colors.grey),
146 ('BOX',(0,0),(-1,-1),1,colors.black),
147 ]
148 if full_spans:
149 for line in full_spans:
150 style.append(('SPAN',(0,line),(-1,line)))
151
152 return TableStyle(style)
153
154 # Normal paragraph
155 def create_paragraph_style(font_name='Helvetica', font_size=10, color=colors.black):
156 style = getSampleStyleSheet()['Normal']
157 style.fontSize = font_size
158 style.fontName = font_name
159 style.textColor = color
160
161 return style
162
163 # Paragraph with word-wrapping, useful for tables
164 def create_body_text(text, font_name='Helvetica', font_size=10, color=colors.black):
165 style = getSampleStyleSheet()['BodyText']
166 style.fontSize = font_size
167 style.fontName = font_name
168 style.textColor = color
169
170 return Paragraph(text, style=style)
171
[end of apps/events/pdf_generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/pdf_generator.py b/apps/events/pdf_generator.py
--- a/apps/events/pdf_generator.py
+++ b/apps/events/pdf_generator.py
@@ -6,6 +6,8 @@
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
+from textwrap import wrap
+
class EventPDF(object):
event = None
@@ -53,7 +55,9 @@
i += 1
self.full_span_attendee_lines.append(i)
if user.allergies:
- self.allergies_table_data.append((user.allergies, user.get_full_name(),))
+ # Breaks the line every 60th character
+ allergies = "\n".join(wrap(user.allergies, width=60))
+ self.allergies_table_data.append((allergies, user.get_full_name(),))
i += 1
@@ -75,7 +79,9 @@
i += 1
self.full_span_waiters_lines.append(i)
if user.allergies:
- self.allergies_table_data.append((user.allergies, user.get_full_name(),))
+ # Breaks the line every 60th character
+ allergies = "\n".join(wrap(user.allergies, width=60))
+ self.allergies_table_data.append((allergies, user.get_full_name(),))
i += 1
| {"golden_diff": "diff --git a/apps/events/pdf_generator.py b/apps/events/pdf_generator.py\n--- a/apps/events/pdf_generator.py\n+++ b/apps/events/pdf_generator.py\n@@ -6,6 +6,8 @@\n from reportlab.lib import colors\n from reportlab.lib.styles import getSampleStyleSheet\n \n+from textwrap import wrap\n+\n class EventPDF(object):\n \n event = None\n@@ -53,7 +55,9 @@\n i += 1\n self.full_span_attendee_lines.append(i) \n if user.allergies:\n- self.allergies_table_data.append((user.allergies, user.get_full_name(),))\n+ # Breaks the line every 60th character\n+ allergies = \"\\n\".join(wrap(user.allergies, width=60))\n+ self.allergies_table_data.append((allergies, user.get_full_name(),))\n \n i += 1\n \n@@ -75,7 +79,9 @@\n i += 1\n self.full_span_waiters_lines.append(i) \n if user.allergies:\n- self.allergies_table_data.append((user.allergies, user.get_full_name(),))\n+ # Breaks the line every 60th character\n+ allergies = \"\\n\".join(wrap(user.allergies, width=60))\n+ self.allergies_table_data.append((allergies, user.get_full_name(),))\n \n i += 1\n", "issue": "Users with many allergies or long text overflows their cell in the pdf\nExample from prod can be seen on Immball 2015.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.contrib.auth.decorators import login_required, user_passes_test\n\nfrom pdfdocument.utils import pdf_response\nfrom reportlab.platypus import TableStyle, Paragraph\nfrom reportlab.lib import colors\nfrom reportlab.lib.styles import getSampleStyleSheet\n\nclass EventPDF(object):\n\n event = None\n attendees = None\n waiters = None\n reservees = None\n attendee_table_data = None\n waiters_table_data = None\n reservee_table_data = None\n allergies_table_data = None\n\n def __init__(self, event):\n self.event = event\n attendee_qs = event.attendance_event.attendees_qs\n self.attendees = sorted(attendee_qs, key=lambda attendee: attendee.user.last_name)\n self.waiters = event.attendance_event.waitlist_qs\n self.reservees = event.attendance_event.reservees_qs \n self.attendee_table_data = [(u'Navn', u'Klasse', u'Studie', u'Telefon'), ]\n self.waiters_table_data = [(u'Navn', u'Klasse', u'Studie', u'Telefon'), ]\n self.reservee_table_data = [(u'Navn', u'Notat'), ]\n self.allergies_table_data = [(u'Allergisk mot', u'Navn'), ]\n\n self.full_span_attendee_lines = []\n self.full_span_waiters_lines = []\n self.create_attendees_table_data()\n self.create_waiters_table_data()\n self.create_reservees_table_data()\n \n\n # Create table data for attendees with a spot\n def create_attendees_table_data(self):\n i = 1\n\n for attendee in self.attendees:\n user = attendee.user\n self.attendee_table_data.append((\n create_body_text(\"%s, %s\" % (user.last_name, user.first_name)),\n user.year, \n create_body_text(user.get_field_of_study_display()),\n user.phone_number\n ))\n\n if attendee.note:\n self.attendee_table_data.append((create_body_text(u'Notat for %s: ' % attendee.user.first_name + attendee.note),))\n i += 1\n self.full_span_attendee_lines.append(i) \n if user.allergies:\n self.allergies_table_data.append((user.allergies, user.get_full_name(),))\n\n i += 1\n\n # Create table data for attendees waiting for a spot\n def create_waiters_table_data(self):\n i = 1\n\n for attendee in self.waiters:\n user = attendee.user\n self.waiters_table_data.append((\n create_body_text(\"%s, %s\" % (user.last_name, user.first_name)),\n user.year, \n create_body_text(user.get_field_of_study_display()),\n user.phone_number\n ))\n \n if attendee.note:\n self.waiters_table_data.append((create_body_text(u'Notat for %s: ' % attendee.user.first_name + attendee.note),))\n i += 1\n self.full_span_waiters_lines.append(i) \n if user.allergies:\n self.allergies_table_data.append((user.allergies, user.get_full_name(),))\n\n i += 1\n\n def create_reservees_table_data(self):\n for reservee in self.reservees:\n self.reservee_table_data.append((\n create_body_text(reservee.name), \n create_body_text(reservee.note)\n ))\n if reservee.allergies:\n self.allergies_table_data.append((\n create_body_text(reservee.allergies),\n create_body_text(reservee.name), \n ))\n if reservee.allergies:\n #self.allergies_table_data = self.allergies_table_data + [reservee.name + ' ' + reservee.allergies] \n pass\n\n def attendee_column_widths(self):\n return (185, 40, 170, 75)\n\n def reservee_column_widths(self):\n return (185, 285)\n\n def allergies_column_widths(self):\n return (285, 185)\n\n def render_pdf(self):\n pdf, response = pdf_response(self.event.title + u\" attendees\")\n pdf.init_report()\n\n pdf.p(self.event.title, style=create_paragraph_style(font_size=18))\n pdf.spacer(10)\n pdf.p(self.event.event_start.strftime('%d. %B %Y'), create_paragraph_style(font_size=9))\n pdf.spacer(height=25)\n\n pdf.p(u\"P\u00e5meldte\", style=create_paragraph_style(font_size=14))\n pdf.spacer(height=20)\n pdf.table(self.attendee_table_data, self.attendee_column_widths(), style=get_table_style(self.full_span_attendee_lines))\n pdf.spacer(height=25)\n \n if self.waiters.count() > 0:\n pdf.p(u\"Venteliste\", style=create_paragraph_style(font_size=14))\n pdf.spacer(height=20)\n pdf.table(self.waiters_table_data, self.attendee_column_widths(), style=get_table_style(self.full_span_waiters_lines))\n pdf.spacer(height=25)\n \n if self.reservees and self.reservees.count() > 0: \n pdf.p(u\"Reservasjoner\", style=create_paragraph_style(font_size=14))\n pdf.spacer(height=20)\n pdf.table(self.reservee_table_data, self.reservee_column_widths(), style=get_table_style())\n pdf.spacer(height=25)\n\n if self.allergies_table_data:\n pdf.p(u\"Allergier\", style=create_paragraph_style(font_size=14))\n pdf.spacer(height=20)\n pdf.table(self.allergies_table_data, self.allergies_column_widths(), style=get_table_style())\n pdf.spacer(height=25)\n\n pdf.generate()\n return response\n\n\n# Table style for framed table with grids\ndef get_table_style(full_spans=None):\n style = [\n ('GRID',(0,0),(-1,-1),0.5,colors.grey),\n ('BOX',(0,0),(-1,-1),1,colors.black),\n ]\n if full_spans:\n for line in full_spans:\n style.append(('SPAN',(0,line),(-1,line)))\n\n return TableStyle(style)\n\n# Normal paragraph\ndef create_paragraph_style(font_name='Helvetica', font_size=10, color=colors.black):\n style = getSampleStyleSheet()['Normal']\n style.fontSize = font_size\n style.fontName = font_name\n style.textColor = color\n\n return style\n\n# Paragraph with word-wrapping, useful for tables\ndef create_body_text(text, font_name='Helvetica', font_size=10, color=colors.black):\n style = getSampleStyleSheet()['BodyText']\n style.fontSize = font_size\n style.fontName = font_name\n style.textColor = color\n\n return Paragraph(text, style=style)\n", "path": "apps/events/pdf_generator.py"}]} | 2,504 | 315 |
gh_patches_debug_5150 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-1095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: convolve with distributed kernel on multiple GPUs
### What happened?
convolve does not work if the kernel is distributed when more than one GPU is available.
### Code snippet triggering the error
```python
import heat as ht
dis_signal = ht.arange(0, 16, split=0, device='gpu', dtype=ht.int)
dis_kernel_odd = ht.ones(3, split=0, dtype=ht.int, device='gpu')
conv = ht.convolve(dis_signal, dis_kernel_odd, mode='full')
```
### Error message or erroneous outcome
```shell
$ CUDA_VISIBLE_DEVICES=0,1,2,3 srun --ntasks=2 -l python test.py
1:Traceback (most recent call last):
1: File ".../test.py", line 7, in <module>
1: conv = ht.convolve(dis_signal, dis_kernel_odd, mode='full')
1: File ".../heat-venv_2023/lib/python3.10/site-packages/heat/core/signal.py", line 161, in convolve
1: local_signal_filtered = fc.conv1d(signal, t_v1)
1: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__cudnn_convolution)
```
### Version
main (development branch)
### Python version
3.10
### PyTorch version
1.12
### MPI version
```shell
OpenMPI 4.1.4
```
</issue>
<code>
[start of heat/core/signal.py]
1 """Provides a collection of signal-processing operations"""
2
3 import torch
4 import numpy as np
5
6 from .communication import MPI
7 from .dndarray import DNDarray
8 from .types import promote_types
9 from .manipulations import pad, flip
10 from .factories import array, zeros
11 import torch.nn.functional as fc
12
13 __all__ = ["convolve"]
14
15
16 def convolve(a: DNDarray, v: DNDarray, mode: str = "full") -> DNDarray:
17 """
18 Returns the discrete, linear convolution of two one-dimensional `DNDarray`s or scalars.
19
20 Parameters
21 ----------
22 a : DNDarray or scalar
23 One-dimensional signal `DNDarray` of shape (N,) or scalar.
24 v : DNDarray or scalar
25 One-dimensional filter weight `DNDarray` of shape (M,) or scalar.
26 mode : str
27 Can be 'full', 'valid', or 'same'. Default is 'full'.
28 'full':
29 Returns the convolution at
30 each point of overlap, with an output shape of (N+M-1,). At
31 the end-points of the convolution, the signals do not overlap
32 completely, and boundary effects may be seen.
33 'same':
34 Mode 'same' returns output of length 'N'. Boundary
35 effects are still visible. This mode is not supported for
36 even-sized filter weights
37 'valid':
38 Mode 'valid' returns output of length 'N-M+1'. The
39 convolution product is only given for points where the signals
40 overlap completely. Values outside the signal boundary have no
41 effect.
42
43 Examples
44 --------
45 Note how the convolution operator flips the second array
46 before "sliding" the two across one another:
47
48 >>> a = ht.ones(10)
49 >>> v = ht.arange(3).astype(ht.float)
50 >>> ht.convolve(a, v, mode='full')
51 DNDarray([0., 1., 3., 3., 3., 3., 2.])
52 >>> ht.convolve(a, v, mode='same')
53 DNDarray([1., 3., 3., 3., 3.])
54 >>> ht.convolve(a, v, mode='valid')
55 DNDarray([3., 3., 3.])
56 >>> a = ht.ones(10, split = 0)
57 >>> v = ht.arange(3, split = 0).astype(ht.float)
58 >>> ht.convolve(a, v, mode='valid')
59 DNDarray([3., 3., 3., 3., 3., 3., 3., 3.])
60
61 [0/3] DNDarray([3., 3., 3.])
62 [1/3] DNDarray([3., 3., 3.])
63 [2/3] DNDarray([3., 3.])
64 >>> a = ht.ones(10, split = 0)
65 >>> v = ht.arange(3, split = 0)
66 >>> ht.convolve(a, v)
67 DNDarray([0., 1., 3., 3., 3., 3., 3., 3., 3., 3., 3., 2.], dtype=ht.float32, device=cpu:0, split=0)
68
69 [0/3] DNDarray([0., 1., 3., 3.])
70 [1/3] DNDarray([3., 3., 3., 3.])
71 [2/3] DNDarray([3., 3., 3., 2.])
72 """
73 if np.isscalar(a):
74 a = array([a])
75 if np.isscalar(v):
76 v = array([v])
77 if not isinstance(a, DNDarray):
78 try:
79 a = array(a)
80 except TypeError:
81 raise TypeError("non-supported type for signal: {}".format(type(a)))
82 if not isinstance(v, DNDarray):
83 try:
84 v = array(v)
85 except TypeError:
86 raise TypeError("non-supported type for filter: {}".format(type(v)))
87 promoted_type = promote_types(a.dtype, v.dtype)
88 a = a.astype(promoted_type)
89 v = v.astype(promoted_type)
90
91 if len(a.shape) != 1 or len(v.shape) != 1:
92 raise ValueError("Only 1-dimensional input DNDarrays are allowed")
93 if mode == "same" and v.shape[0] % 2 == 0:
94 raise ValueError("Mode 'same' cannot be used with even-sized kernel")
95 if not v.is_balanced():
96 raise ValueError("Only balanced kernel weights are allowed")
97
98 if v.shape[0] > a.shape[0]:
99 a, v = v, a
100
101 # compute halo size
102 halo_size = torch.max(v.lshape_map[:, 0]).item() // 2
103
104 # pad DNDarray with zeros according to mode
105 if mode == "full":
106 pad_size = v.shape[0] - 1
107 gshape = v.shape[0] + a.shape[0] - 1
108 elif mode == "same":
109 pad_size = v.shape[0] // 2
110 gshape = a.shape[0]
111 elif mode == "valid":
112 pad_size = 0
113 gshape = a.shape[0] - v.shape[0] + 1
114 else:
115 raise ValueError("Supported modes are 'full', 'valid', 'same', got {}".format(mode))
116
117 a = pad(a, pad_size, "constant", 0)
118
119 if a.is_distributed():
120 if (v.lshape_map[:, 0] > a.lshape_map[:, 0]).any():
121 raise ValueError(
122 "Local chunk of filter weight is larger than the local chunks of signal"
123 )
124 # fetch halos and store them in a.halo_next/a.halo_prev
125 a.get_halo(halo_size)
126 # apply halos to local array
127 signal = a.array_with_halos
128 else:
129 signal = a.larray
130
131 # flip filter for convolution as Pytorch conv1d computes correlations
132 v = flip(v, [0])
133 if v.larray.shape != v.lshape_map[0]:
134 # pads weights if input kernel is uneven
135 target = torch.zeros(v.lshape_map[0][0], dtype=v.larray.dtype, device=v.larray.device)
136 pad_size = v.lshape_map[0][0] - v.larray.shape[0]
137 target[pad_size:] = v.larray
138 weight = target
139 else:
140 weight = v.larray
141
142 t_v = weight # stores temporary weight
143
144 # make signal and filter weight 3D for Pytorch conv1d function
145 signal = signal.reshape(1, 1, signal.shape[0])
146 weight = weight.reshape(1, 1, weight.shape[0])
147
148 # cast to float if on GPU
149 if signal.is_cuda:
150 float_type = promote_types(signal.dtype, torch.float32).torch_type()
151 signal = signal.to(float_type)
152 weight = weight.to(float_type)
153 t_v = t_v.to(float_type)
154
155 if v.is_distributed():
156 size = v.comm.size
157
158 for r in range(size):
159 rec_v = v.comm.bcast(t_v, root=r)
160 t_v1 = rec_v.reshape(1, 1, rec_v.shape[0])
161 local_signal_filtered = fc.conv1d(signal, t_v1)
162 # unpack 3D result into 1D
163 local_signal_filtered = local_signal_filtered[0, 0, :]
164
165 if a.comm.rank != 0 and v.lshape_map[0][0] % 2 == 0:
166 local_signal_filtered = local_signal_filtered[1:]
167
168 # accumulate filtered signal on the fly
169 global_signal_filtered = array(
170 local_signal_filtered, is_split=0, device=a.device, comm=a.comm
171 )
172 if r == 0:
173 # initialize signal_filtered, starting point of slice
174 signal_filtered = zeros(
175 gshape, dtype=a.dtype, split=a.split, device=a.device, comm=a.comm
176 )
177 start_idx = 0
178
179 # accumulate relevant slice of filtered signal
180 # note, this is a binary operation between unevenly distributed dndarrays and will require communication, check out _operations.__binary_op()
181 signal_filtered += global_signal_filtered[start_idx : start_idx + gshape]
182 if r != size - 1:
183 start_idx += v.lshape_map[r + 1][0].item()
184 return signal_filtered
185
186 else:
187 # apply torch convolution operator
188 signal_filtered = fc.conv1d(signal, weight)
189
190 # unpack 3D result into 1D
191 signal_filtered = signal_filtered[0, 0, :]
192
193 # if kernel shape along split axis is even we need to get rid of duplicated values
194 if a.comm.rank != 0 and v.shape[0] % 2 == 0:
195 signal_filtered = signal_filtered[1:]
196
197 return DNDarray(
198 signal_filtered.contiguous(),
199 (gshape,),
200 signal_filtered.dtype,
201 a.split,
202 a.device,
203 a.comm,
204 balanced=False,
205 ).astype(a.dtype.torch_type())
206
[end of heat/core/signal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/heat/core/signal.py b/heat/core/signal.py
--- a/heat/core/signal.py
+++ b/heat/core/signal.py
@@ -156,7 +156,8 @@
size = v.comm.size
for r in range(size):
- rec_v = v.comm.bcast(t_v, root=r)
+ rec_v = t_v.clone()
+ v.comm.Bcast(rec_v, root=r)
t_v1 = rec_v.reshape(1, 1, rec_v.shape[0])
local_signal_filtered = fc.conv1d(signal, t_v1)
# unpack 3D result into 1D
| {"golden_diff": "diff --git a/heat/core/signal.py b/heat/core/signal.py\n--- a/heat/core/signal.py\n+++ b/heat/core/signal.py\n@@ -156,7 +156,8 @@\n size = v.comm.size\n \n for r in range(size):\n- rec_v = v.comm.bcast(t_v, root=r)\n+ rec_v = t_v.clone()\n+ v.comm.Bcast(rec_v, root=r)\n t_v1 = rec_v.reshape(1, 1, rec_v.shape[0])\n local_signal_filtered = fc.conv1d(signal, t_v1)\n # unpack 3D result into 1D\n", "issue": "[Bug]: convolve with distributed kernel on multiple GPUs\n### What happened?\r\n\r\nconvolve does not work if the kernel is distributed when more than one GPU is available.\r\n\r\n### Code snippet triggering the error\r\n\r\n```python\r\nimport heat as ht\r\n\r\ndis_signal = ht.arange(0, 16, split=0, device='gpu', dtype=ht.int)\r\ndis_kernel_odd = ht.ones(3, split=0, dtype=ht.int, device='gpu')\r\nconv = ht.convolve(dis_signal, dis_kernel_odd, mode='full')\r\n```\r\n\r\n\r\n### Error message or erroneous outcome\r\n\r\n```shell\r\n$ CUDA_VISIBLE_DEVICES=0,1,2,3 srun --ntasks=2 -l python test.py \r\n1:Traceback (most recent call last):\r\n1: File \".../test.py\", line 7, in <module>\r\n1: conv = ht.convolve(dis_signal, dis_kernel_odd, mode='full')\r\n1: File \".../heat-venv_2023/lib/python3.10/site-packages/heat/core/signal.py\", line 161, in convolve\r\n1: local_signal_filtered = fc.conv1d(signal, t_v1)\r\n1: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__cudnn_convolution)\r\n```\r\n\r\n\r\n### Version\r\n\r\nmain (development branch)\r\n\r\n### Python version\r\n\r\n3.10\r\n\r\n### PyTorch version\r\n\r\n1.12\r\n\r\n### MPI version\r\n\r\n```shell\r\nOpenMPI 4.1.4\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Provides a collection of signal-processing operations\"\"\"\n\nimport torch\nimport numpy as np\n\nfrom .communication import MPI\nfrom .dndarray import DNDarray\nfrom .types import promote_types\nfrom .manipulations import pad, flip\nfrom .factories import array, zeros\nimport torch.nn.functional as fc\n\n__all__ = [\"convolve\"]\n\n\ndef convolve(a: DNDarray, v: DNDarray, mode: str = \"full\") -> DNDarray:\n \"\"\"\n Returns the discrete, linear convolution of two one-dimensional `DNDarray`s or scalars.\n\n Parameters\n ----------\n a : DNDarray or scalar\n One-dimensional signal `DNDarray` of shape (N,) or scalar.\n v : DNDarray or scalar\n One-dimensional filter weight `DNDarray` of shape (M,) or scalar.\n mode : str\n Can be 'full', 'valid', or 'same'. Default is 'full'.\n 'full':\n Returns the convolution at\n each point of overlap, with an output shape of (N+M-1,). At\n the end-points of the convolution, the signals do not overlap\n completely, and boundary effects may be seen.\n 'same':\n Mode 'same' returns output of length 'N'. Boundary\n effects are still visible. This mode is not supported for\n even-sized filter weights\n 'valid':\n Mode 'valid' returns output of length 'N-M+1'. The\n convolution product is only given for points where the signals\n overlap completely. Values outside the signal boundary have no\n effect.\n\n Examples\n --------\n Note how the convolution operator flips the second array\n before \"sliding\" the two across one another:\n\n >>> a = ht.ones(10)\n >>> v = ht.arange(3).astype(ht.float)\n >>> ht.convolve(a, v, mode='full')\n DNDarray([0., 1., 3., 3., 3., 3., 2.])\n >>> ht.convolve(a, v, mode='same')\n DNDarray([1., 3., 3., 3., 3.])\n >>> ht.convolve(a, v, mode='valid')\n DNDarray([3., 3., 3.])\n >>> a = ht.ones(10, split = 0)\n >>> v = ht.arange(3, split = 0).astype(ht.float)\n >>> ht.convolve(a, v, mode='valid')\n DNDarray([3., 3., 3., 3., 3., 3., 3., 3.])\n\n [0/3] DNDarray([3., 3., 3.])\n [1/3] DNDarray([3., 3., 3.])\n [2/3] DNDarray([3., 3.])\n >>> a = ht.ones(10, split = 0)\n >>> v = ht.arange(3, split = 0)\n >>> ht.convolve(a, v)\n DNDarray([0., 1., 3., 3., 3., 3., 3., 3., 3., 3., 3., 2.], dtype=ht.float32, device=cpu:0, split=0)\n\n [0/3] DNDarray([0., 1., 3., 3.])\n [1/3] DNDarray([3., 3., 3., 3.])\n [2/3] DNDarray([3., 3., 3., 2.])\n \"\"\"\n if np.isscalar(a):\n a = array([a])\n if np.isscalar(v):\n v = array([v])\n if not isinstance(a, DNDarray):\n try:\n a = array(a)\n except TypeError:\n raise TypeError(\"non-supported type for signal: {}\".format(type(a)))\n if not isinstance(v, DNDarray):\n try:\n v = array(v)\n except TypeError:\n raise TypeError(\"non-supported type for filter: {}\".format(type(v)))\n promoted_type = promote_types(a.dtype, v.dtype)\n a = a.astype(promoted_type)\n v = v.astype(promoted_type)\n\n if len(a.shape) != 1 or len(v.shape) != 1:\n raise ValueError(\"Only 1-dimensional input DNDarrays are allowed\")\n if mode == \"same\" and v.shape[0] % 2 == 0:\n raise ValueError(\"Mode 'same' cannot be used with even-sized kernel\")\n if not v.is_balanced():\n raise ValueError(\"Only balanced kernel weights are allowed\")\n\n if v.shape[0] > a.shape[0]:\n a, v = v, a\n\n # compute halo size\n halo_size = torch.max(v.lshape_map[:, 0]).item() // 2\n\n # pad DNDarray with zeros according to mode\n if mode == \"full\":\n pad_size = v.shape[0] - 1\n gshape = v.shape[0] + a.shape[0] - 1\n elif mode == \"same\":\n pad_size = v.shape[0] // 2\n gshape = a.shape[0]\n elif mode == \"valid\":\n pad_size = 0\n gshape = a.shape[0] - v.shape[0] + 1\n else:\n raise ValueError(\"Supported modes are 'full', 'valid', 'same', got {}\".format(mode))\n\n a = pad(a, pad_size, \"constant\", 0)\n\n if a.is_distributed():\n if (v.lshape_map[:, 0] > a.lshape_map[:, 0]).any():\n raise ValueError(\n \"Local chunk of filter weight is larger than the local chunks of signal\"\n )\n # fetch halos and store them in a.halo_next/a.halo_prev\n a.get_halo(halo_size)\n # apply halos to local array\n signal = a.array_with_halos\n else:\n signal = a.larray\n\n # flip filter for convolution as Pytorch conv1d computes correlations\n v = flip(v, [0])\n if v.larray.shape != v.lshape_map[0]:\n # pads weights if input kernel is uneven\n target = torch.zeros(v.lshape_map[0][0], dtype=v.larray.dtype, device=v.larray.device)\n pad_size = v.lshape_map[0][0] - v.larray.shape[0]\n target[pad_size:] = v.larray\n weight = target\n else:\n weight = v.larray\n\n t_v = weight # stores temporary weight\n\n # make signal and filter weight 3D for Pytorch conv1d function\n signal = signal.reshape(1, 1, signal.shape[0])\n weight = weight.reshape(1, 1, weight.shape[0])\n\n # cast to float if on GPU\n if signal.is_cuda:\n float_type = promote_types(signal.dtype, torch.float32).torch_type()\n signal = signal.to(float_type)\n weight = weight.to(float_type)\n t_v = t_v.to(float_type)\n\n if v.is_distributed():\n size = v.comm.size\n\n for r in range(size):\n rec_v = v.comm.bcast(t_v, root=r)\n t_v1 = rec_v.reshape(1, 1, rec_v.shape[0])\n local_signal_filtered = fc.conv1d(signal, t_v1)\n # unpack 3D result into 1D\n local_signal_filtered = local_signal_filtered[0, 0, :]\n\n if a.comm.rank != 0 and v.lshape_map[0][0] % 2 == 0:\n local_signal_filtered = local_signal_filtered[1:]\n\n # accumulate filtered signal on the fly\n global_signal_filtered = array(\n local_signal_filtered, is_split=0, device=a.device, comm=a.comm\n )\n if r == 0:\n # initialize signal_filtered, starting point of slice\n signal_filtered = zeros(\n gshape, dtype=a.dtype, split=a.split, device=a.device, comm=a.comm\n )\n start_idx = 0\n\n # accumulate relevant slice of filtered signal\n # note, this is a binary operation between unevenly distributed dndarrays and will require communication, check out _operations.__binary_op()\n signal_filtered += global_signal_filtered[start_idx : start_idx + gshape]\n if r != size - 1:\n start_idx += v.lshape_map[r + 1][0].item()\n return signal_filtered\n\n else:\n # apply torch convolution operator\n signal_filtered = fc.conv1d(signal, weight)\n\n # unpack 3D result into 1D\n signal_filtered = signal_filtered[0, 0, :]\n\n # if kernel shape along split axis is even we need to get rid of duplicated values\n if a.comm.rank != 0 and v.shape[0] % 2 == 0:\n signal_filtered = signal_filtered[1:]\n\n return DNDarray(\n signal_filtered.contiguous(),\n (gshape,),\n signal_filtered.dtype,\n a.split,\n a.device,\n a.comm,\n balanced=False,\n ).astype(a.dtype.torch_type())\n", "path": "heat/core/signal.py"}]} | 3,430 | 147 |
gh_patches_debug_7155 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No module named 'django.utils.importlib' (Django dev)
In mongoengine/django/mongo_auth/models.py
See https://github.com/django/django/tree/master/django/utils
No module named 'django.utils.importlib' (Django dev)
In mongoengine/django/mongo_auth/models.py
See https://github.com/django/django/tree/master/django/utils
</issue>
<code>
[start of mongoengine/django/mongo_auth/models.py]
1 from django.conf import settings
2 from django.contrib.auth.hashers import make_password
3 from django.contrib.auth.models import UserManager
4 from django.core.exceptions import ImproperlyConfigured
5 from django.db import models
6 from django.utils.importlib import import_module
7 from django.utils.translation import ugettext_lazy as _
8
9
10 __all__ = (
11 'get_user_document',
12 )
13
14
15 MONGOENGINE_USER_DOCUMENT = getattr(
16 settings, 'MONGOENGINE_USER_DOCUMENT', 'mongoengine.django.auth.User')
17
18
19 def get_user_document():
20 """Get the user document class used for authentication.
21
22 This is the class defined in settings.MONGOENGINE_USER_DOCUMENT, which
23 defaults to `mongoengine.django.auth.User`.
24
25 """
26
27 name = MONGOENGINE_USER_DOCUMENT
28 dot = name.rindex('.')
29 module = import_module(name[:dot])
30 return getattr(module, name[dot + 1:])
31
32
33 class MongoUserManager(UserManager):
34 """A User manager wich allows the use of MongoEngine documents in Django.
35
36 To use the manager, you must tell django.contrib.auth to use MongoUser as
37 the user model. In you settings.py, you need:
38
39 INSTALLED_APPS = (
40 ...
41 'django.contrib.auth',
42 'mongoengine.django.mongo_auth',
43 ...
44 )
45 AUTH_USER_MODEL = 'mongo_auth.MongoUser'
46
47 Django will use the model object to access the custom Manager, which will
48 replace the original queryset with MongoEngine querysets.
49
50 By default, mongoengine.django.auth.User will be used to store users. You
51 can specify another document class in MONGOENGINE_USER_DOCUMENT in your
52 settings.py.
53
54 The User Document class has the same requirements as a standard custom user
55 model: https://docs.djangoproject.com/en/dev/topics/auth/customizing/
56
57 In particular, the User Document class must define USERNAME_FIELD and
58 REQUIRED_FIELDS.
59
60 `AUTH_USER_MODEL` has been added in Django 1.5.
61
62 """
63
64 def contribute_to_class(self, model, name):
65 super(MongoUserManager, self).contribute_to_class(model, name)
66 self.dj_model = self.model
67 self.model = get_user_document()
68
69 self.dj_model.USERNAME_FIELD = self.model.USERNAME_FIELD
70 username = models.CharField(_('username'), max_length=30, unique=True)
71 username.contribute_to_class(self.dj_model, self.dj_model.USERNAME_FIELD)
72
73 self.dj_model.REQUIRED_FIELDS = self.model.REQUIRED_FIELDS
74 for name in self.dj_model.REQUIRED_FIELDS:
75 field = models.CharField(_(name), max_length=30)
76 field.contribute_to_class(self.dj_model, name)
77
78
79 def get(self, *args, **kwargs):
80 try:
81 return self.get_query_set().get(*args, **kwargs)
82 except self.model.DoesNotExist:
83 # ModelBackend expects this exception
84 raise self.dj_model.DoesNotExist
85
86 @property
87 def db(self):
88 raise NotImplementedError
89
90 def get_empty_query_set(self):
91 return self.model.objects.none()
92
93 def get_query_set(self):
94 return self.model.objects
95
96
97 class MongoUser(models.Model):
98 """"Dummy user model for Django.
99
100 MongoUser is used to replace Django's UserManager with MongoUserManager.
101 The actual user document class is mongoengine.django.auth.User or any
102 other document class specified in MONGOENGINE_USER_DOCUMENT.
103
104 To get the user document class, use `get_user_document()`.
105
106 """
107
108 objects = MongoUserManager()
109
110 class Meta:
111 app_label = 'mongo_auth'
112
113 def set_password(self, password):
114 """Doesn't do anything, but works around the issue with Django 1.6."""
115 make_password(password)
116
[end of mongoengine/django/mongo_auth/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/django/mongo_auth/models.py b/mongoengine/django/mongo_auth/models.py
--- a/mongoengine/django/mongo_auth/models.py
+++ b/mongoengine/django/mongo_auth/models.py
@@ -3,7 +3,11 @@
from django.contrib.auth.models import UserManager
from django.core.exceptions import ImproperlyConfigured
from django.db import models
-from django.utils.importlib import import_module
+try:
+ from django.utils.module_loading import import_module
+except ImportError:
+ """Handle older versions of Django"""
+ from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
| {"golden_diff": "diff --git a/mongoengine/django/mongo_auth/models.py b/mongoengine/django/mongo_auth/models.py\n--- a/mongoengine/django/mongo_auth/models.py\n+++ b/mongoengine/django/mongo_auth/models.py\n@@ -3,7 +3,11 @@\n from django.contrib.auth.models import UserManager\n from django.core.exceptions import ImproperlyConfigured\n from django.db import models\n-from django.utils.importlib import import_module\n+try:\n+ from django.utils.module_loading import import_module\n+except ImportError:\n+ \"\"\"Handle older versions of Django\"\"\"\n+ from django.utils.importlib import import_module\n from django.utils.translation import ugettext_lazy as _\n", "issue": "No module named 'django.utils.importlib' (Django dev)\nIn mongoengine/django/mongo_auth/models.py\nSee https://github.com/django/django/tree/master/django/utils\n\nNo module named 'django.utils.importlib' (Django dev)\nIn mongoengine/django/mongo_auth/models.py\nSee https://github.com/django/django/tree/master/django/utils\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.auth.models import UserManager\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.utils.importlib import import_module\nfrom django.utils.translation import ugettext_lazy as _\n\n\n__all__ = (\n 'get_user_document',\n)\n\n\nMONGOENGINE_USER_DOCUMENT = getattr(\n settings, 'MONGOENGINE_USER_DOCUMENT', 'mongoengine.django.auth.User')\n\n\ndef get_user_document():\n \"\"\"Get the user document class used for authentication.\n\n This is the class defined in settings.MONGOENGINE_USER_DOCUMENT, which\n defaults to `mongoengine.django.auth.User`.\n\n \"\"\"\n\n name = MONGOENGINE_USER_DOCUMENT\n dot = name.rindex('.')\n module = import_module(name[:dot])\n return getattr(module, name[dot + 1:])\n\n\nclass MongoUserManager(UserManager):\n \"\"\"A User manager wich allows the use of MongoEngine documents in Django.\n\n To use the manager, you must tell django.contrib.auth to use MongoUser as\n the user model. In you settings.py, you need:\n\n INSTALLED_APPS = (\n ...\n 'django.contrib.auth',\n 'mongoengine.django.mongo_auth',\n ...\n )\n AUTH_USER_MODEL = 'mongo_auth.MongoUser'\n\n Django will use the model object to access the custom Manager, which will\n replace the original queryset with MongoEngine querysets.\n\n By default, mongoengine.django.auth.User will be used to store users. You\n can specify another document class in MONGOENGINE_USER_DOCUMENT in your\n settings.py.\n\n The User Document class has the same requirements as a standard custom user\n model: https://docs.djangoproject.com/en/dev/topics/auth/customizing/\n\n In particular, the User Document class must define USERNAME_FIELD and\n REQUIRED_FIELDS.\n\n `AUTH_USER_MODEL` has been added in Django 1.5.\n\n \"\"\"\n\n def contribute_to_class(self, model, name):\n super(MongoUserManager, self).contribute_to_class(model, name)\n self.dj_model = self.model\n self.model = get_user_document()\n\n self.dj_model.USERNAME_FIELD = self.model.USERNAME_FIELD\n username = models.CharField(_('username'), max_length=30, unique=True)\n username.contribute_to_class(self.dj_model, self.dj_model.USERNAME_FIELD)\n\n self.dj_model.REQUIRED_FIELDS = self.model.REQUIRED_FIELDS\n for name in self.dj_model.REQUIRED_FIELDS:\n field = models.CharField(_(name), max_length=30)\n field.contribute_to_class(self.dj_model, name)\n\n\n def get(self, *args, **kwargs):\n try:\n return self.get_query_set().get(*args, **kwargs)\n except self.model.DoesNotExist:\n # ModelBackend expects this exception\n raise self.dj_model.DoesNotExist\n\n @property\n def db(self):\n raise NotImplementedError\n\n def get_empty_query_set(self):\n return self.model.objects.none()\n\n def get_query_set(self):\n return self.model.objects\n\n\nclass MongoUser(models.Model):\n \"\"\"\"Dummy user model for Django.\n\n MongoUser is used to replace Django's UserManager with MongoUserManager.\n The actual user document class is mongoengine.django.auth.User or any\n other document class specified in MONGOENGINE_USER_DOCUMENT.\n\n To get the user document class, use `get_user_document()`.\n\n \"\"\"\n\n objects = MongoUserManager()\n\n class Meta:\n app_label = 'mongo_auth'\n\n def set_password(self, password):\n \"\"\"Doesn't do anything, but works around the issue with Django 1.6.\"\"\"\n make_password(password)\n", "path": "mongoengine/django/mongo_auth/models.py"}]} | 1,670 | 145 |
gh_patches_debug_27750 | rasdani/github-patches | git_diff | fossasia__open-event-server-4399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User-Session: user/<id>/sessions returns all the sessions in system
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
The user API returns all the sessions in the system instead of sessions under the user
eg
URL
```
https://open-event-api.herokuapp.com/v1/users/5/sessions
```
Query Params:
```
include:event
sort:starts-at
```
@poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it
</issue>
<code>
[start of app/api/sessions.py]
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2
3 from app.api.bootstrap import api
4 from app.api.events import Event
5 from app.api.helpers.db import safe_query
6 from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject
7 from app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject
8 from app.api.helpers.permissions import current_identity
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.sessions import SessionSchema
12 from app.models import db
13 from app.models.microlocation import Microlocation
14 from app.models.session import Session
15 from app.models.session_type import SessionType
16 from app.models.speaker import Speaker
17 from app.models.track import Track
18 from app.settings import get_settings
19
20
21 class SessionListPost(ResourceList):
22 """
23 List Sessions
24 """
25 def before_post(self, args, kwargs, data):
26 require_relationship(['event'], data)
27 data['creator_id'] = current_identity.id
28
29 def after_create_object(self, session, data, view_kwargs):
30 if session.event.get_organizer():
31 event_name = session.event.name
32 organizer = session.event.get_organizer()
33 organizer_email = organizer.email
34 frontend_url = get_settings()['frontend_url']
35 link = "{}/events/{}/sessions/{}"\
36 .format(frontend_url, session.event_id, session.id)
37 send_email_new_session(organizer_email, event_name, link)
38 send_notif_new_session_organizer(organizer, event_name, link)
39
40 decorators = (api.has_permission('create_event'),)
41 schema = SessionSchema
42 data_layer = {'session': db.session,
43 'model': Session,
44 'methods': {'after_create_object': after_create_object
45 }}
46
47
48 class SessionList(ResourceList):
49 """
50 List Sessions
51 """
52
53 def query(self, view_kwargs):
54 query_ = self.session.query(Session)
55 if view_kwargs.get('track_id') is not None:
56 track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')
57 query_ = query_.join(Track).filter(Track.id == track.id)
58 if view_kwargs.get('session_type_id') is not None:
59 session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')
60 query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)
61 if view_kwargs.get('microlocation_id') is not None:
62 microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')
63 query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)
64 query_ = event_query(self, query_, view_kwargs)
65 if view_kwargs.get('speaker_id'):
66 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
67 # session-speaker :: many-to-many relationship
68 query_ = Session.query.filter(Session.speakers.any(id=speaker.id))
69 return query_
70
71 view_kwargs = True
72 methods = ['GET']
73 schema = SessionSchema
74 data_layer = {'session': db.session,
75 'model': Session,
76 'methods': {
77 'query': query
78 }}
79
80
81 class SessionDetail(ResourceDetail):
82 """
83 Session detail by id
84 """
85 def before_get_object(self, view_kwargs):
86 if view_kwargs.get('event_identifier'):
87 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')
88 view_kwargs['event_id'] = event.id
89
90 def after_update_object(self, session, data, view_kwargs):
91 """ Send email if session accepted or rejected """
92 if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):
93 # Email for speaker
94 speakers = session.speakers
95 for speaker in speakers:
96 frontend_url = get_settings()['frontend_url']
97 link = "{}/events/{}/sessions/{}" \
98 .format(frontend_url, session.event_id, session.id)
99 send_email_session_accept_reject(speaker.email, session, link)
100 send_notif_session_accept_reject(speaker, session.title, session.state, link)
101
102 # Email for organizer
103 if session.event.get_organizer():
104 organizer = session.event.get_organizer()
105 organizer_email = organizer.email
106 frontend_url = get_settings()['frontend_url']
107 link = "{}/events/{}/sessions/{}" \
108 .format(frontend_url, session.event_id, session.id)
109 send_email_session_accept_reject(organizer_email, session,
110 link)
111 send_notif_session_accept_reject(organizer, session.title,
112 session.state, link)
113
114
115 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
116 schema = SessionSchema
117 data_layer = {'session': db.session,
118 'model': Session,
119 'methods': {'before_get_object': before_get_object,
120 'after_update_object': after_update_object}}
121
122
123 class SessionRelationshipRequired(ResourceRelationship):
124 """
125 Session Relationship
126 """
127 schema = SessionSchema
128 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
129 methods = ['GET', 'PATCH']
130 data_layer = {'session': db.session,
131 'model': Session}
132
133
134 class SessionRelationshipOptional(ResourceRelationship):
135 """
136 Session Relationship
137 """
138 schema = SessionSchema
139 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
140 data_layer = {'session': db.session,
141 'model': Session}
142
[end of app/api/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/sessions.py b/app/api/sessions.py
--- a/app/api/sessions.py
+++ b/app/api/sessions.py
@@ -15,6 +15,7 @@
from app.models.session_type import SessionType
from app.models.speaker import Speaker
from app.models.track import Track
+from app.models.user import User
from app.settings import get_settings
@@ -61,11 +62,15 @@
if view_kwargs.get('microlocation_id') is not None:
microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')
query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)
+ if view_kwargs.get('user_id') is not None:
+ user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
+ query_ = query_.join(User).filter(User.id == user.id)
query_ = event_query(self, query_, view_kwargs)
if view_kwargs.get('speaker_id'):
speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
# session-speaker :: many-to-many relationship
query_ = Session.query.filter(Session.speakers.any(id=speaker.id))
+
return query_
view_kwargs = True
| {"golden_diff": "diff --git a/app/api/sessions.py b/app/api/sessions.py\n--- a/app/api/sessions.py\n+++ b/app/api/sessions.py\n@@ -15,6 +15,7 @@\n from app.models.session_type import SessionType\n from app.models.speaker import Speaker\n from app.models.track import Track\n+from app.models.user import User\n from app.settings import get_settings\n \n \n@@ -61,11 +62,15 @@\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n+ if view_kwargs.get('user_id') is not None:\n+ user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n+ query_ = query_.join(User).filter(User.id == user.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n+\n return query_\n \n view_kwargs = True\n", "issue": "User-Session: user/<id>/sessions returns all the sessions in system\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nThe user API returns all the sessions in the system instead of sessions under the user\r\neg\r\nURL\r\n```\r\nhttps://open-event-api.herokuapp.com/v1/users/5/sessions\r\n```\r\n\r\nQuery Params:\r\n```\r\ninclude:event\r\nsort:starts-at\r\n```\r\n\r\n@poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject\nfrom app.api.helpers.permissions import current_identity\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.settings import get_settings\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n\n def after_create_object(self, session, data, view_kwargs):\n if session.event.get_organizer():\n event_name = session.event.name\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n send_notif_new_session_organizer(organizer, event_name, link)\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(speaker, session.title, session.state, link)\n\n # Email for organizer\n if session.event.get_organizer():\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(organizer_email, session,\n link)\n send_notif_session_accept_reject(organizer, session.title,\n session.state, link)\n\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'before_get_object': before_get_object,\n 'after_update_object': after_update_object}}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n", "path": "app/api/sessions.py"}]} | 2,241 | 297 |
gh_patches_debug_13494 | rasdani/github-patches | git_diff | pymedusa__Medusa-3657 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Post-processor crashes
**Tried submitting this log through Medusa, but that feature doesn't work** (someone else already submitted that issue)
**Branch/Commit:** commit b16e0e5eb4ded727eebd2ab2cd9683ab597662f5 (HEAD -> master, origin/master, origin/HEAD)
**OS:** FreeNAS 11.0-U4
**What you did:** restarted Medusa, waited 10 minutes for the post-processor to kick in
**What happened:** exception generated; see below
**What you expected:** downloads should've been sorted into their destination directories
**Logs:**
```
2017-11-03 08:28:35 ERROR POSTPROCESSOR :: [b16e0e5] Exception generated: 'ascii' codec can't encode character u'\u2019' in position 37: ordinal not in range(128)
Traceback (most recent call last):
File "/usr/local/share/medusa/medusa/scheduler.py", line 93, in run
self.action.run(self.force)
File "/usr/local/share/medusa/medusa/auto_post_processor.py", line 57, in run
process_tv.ProcessResult(app.TV_DOWNLOAD_DIR, app.PROCESS_METHOD).process(force=force)
File "/usr/local/share/medusa/medusa/process_tv.py", line 162, in process
ignore_subs=ignore_subs)
File "/usr/local/share/medusa/medusa/process_tv.py", line 334, in process_files
self.process_media(path, self.video_files, force, is_priority, ignore_subs)
File "/usr/local/share/medusa/medusa/process_tv.py", line 535, in process_media
self.result = processor.process()
File "/usr/local/share/medusa/medusa/post_processor.py", line 1093, in process
self._delete(cur_ep.location, associated_files=True)
File "/usr/local/share/medusa/medusa/post_processor.py", line 330, in _delete
file_list += self.list_associated_files(file_list[0], subfolders=True)
File "/usr/local/share/medusa/medusa/post_processor.py", line 190, in list_associated_files
processed_names += filter(None, (self._rar_basename(file_path, files),))
File "/usr/local/share/medusa/medusa/post_processor.py", line 300, in _rar_basename
for rar in rars:
File "/usr/local/share/medusa/medusa/post_processor.py", line 298, in <genexpr>
rars = (x for x in files if rarfile.is_rarfile(x))
File "/usr/local/share/medusa/ext/rarfile.py", line 404, in is_rarfile
return _get_rar_version(xfile) > 0
File "/usr/local/share/medusa/ext/rarfile.py", line 389, in _get_rar_version
with XFile(xfile) as fd:
File "/usr/local/share/medusa/ext/rarfile.py", line 2401, in __init__
self._fd = open(xfile, 'rb', bufsize)
UnicodeEncodeError: 'ascii' codec can't encode character u'\u2019' in position 37: ordinal not in range(128)
```
</issue>
<code>
[start of medusa/init/filesystem.py]
1 # coding=utf-8
2 """Replace core filesystem functions."""
3
4 import glob
5 import io
6 import os
7 import shutil
8 import sys
9 import tarfile
10
11 import tempfile # noqa # pylint: disable=unused-import
12 import certifi
13
14 from six import binary_type, text_type
15
16
17 fs_encoding = sys.getfilesystemencoding()
18
19
20 def encode(value):
21 """Encode to bytes."""
22 return value.encode('utf-8' if os.name != 'nt' else fs_encoding)
23
24
25 def decode(value):
26 """Decode to unicode."""
27 # on windows the returned info from fs operations needs to be decoded using fs encoding
28 return text_type(value, 'utf-8' if os.name != 'nt' else fs_encoding)
29
30
31 def _handle_input(arg):
32 """Encode argument to utf-8 or fs encoding."""
33 # on windows the input params for fs operations needs to be encoded using fs encoding
34 return encode(arg) if isinstance(arg, text_type) else arg
35
36
37 def _handle_output_u(result):
38 """Convert result to unicode."""
39 if not result:
40 return result
41
42 if isinstance(result, binary_type):
43 return decode(result)
44
45 if isinstance(result, list) or isinstance(result, tuple):
46 return map(_handle_output_u, result)
47
48 if isinstance(result, dict):
49 for k, v in result.items():
50 result[k] = _handle_output_u(v)
51 return result
52
53 return result
54
55
56 def _handle_output_b(result):
57 """Convert result to binary."""
58 if not result:
59 return result
60
61 if isinstance(result, text_type):
62 return encode(result)
63
64 if isinstance(result, list) or isinstance(result, tuple):
65 return map(_handle_output_b, result)
66
67 if isinstance(result, dict):
68 for k, v in result.items():
69 result[k] = _handle_output_b(v)
70 return result
71
72 return result
73
74
75 def _varargs(*args):
76 """Encode var arguments to utf-8 or fs encoding."""
77 return [_handle_input(arg) for arg in args]
78
79
80 def _varkwargs(**kwargs):
81 """Encode var keyword arguments to utf-8."""
82 return {k: _handle_input(arg) for k, arg in kwargs.items()}
83
84
85 def make_closure(f, handle_arg=None, handle_output=None):
86 """Apply an input handler and output handler to a function.
87
88 Used to ensure UTF-8 encoding at input and output.
89 """
90 return patch_output(patch_input(f, handle_arg), handle_output)
91
92
93 def patch_input(f, handle_arg=None):
94 """Patch all args and kwargs of function f.
95
96 If handle_arg is None, just return the original function.
97 """
98 def patched_input(*args, **kwargs):
99 return f(*[handle_arg(arg) for arg in args], **{k: handle_arg(arg) for k, arg in kwargs.items()})
100 return patched_input if callable(handle_arg) else f
101
102
103 def patch_output(f, handle_output=None):
104 """Patch the output of function f with the handle_output function.
105
106 If handle_output is None, just return the original function.
107 """
108 def patched_output(*args, **kwargs):
109 return handle_output(f(*args, **kwargs))
110 return patched_output if callable(handle_output) else f
111
112
113 def initialize():
114 """Replace original functions if the fs encoding is not utf-8."""
115 if hasattr(sys, '_called_from_test'):
116 return
117
118 affected_functions = {
119 certifi: ['where', 'old_where'],
120 glob: ['glob'],
121 io: ['open'],
122 os: ['access', 'chdir', 'listdir', 'makedirs', 'mkdir', 'remove',
123 'rename', 'renames', 'rmdir', 'stat', 'unlink', 'utime', 'walk'],
124 os.path: ['abspath', 'basename', 'dirname', 'exists', 'getctime', 'getmtime', 'getsize',
125 'isabs', 'isdir', 'isfile', 'islink', 'join', 'normcase', 'normpath', 'realpath', 'relpath',
126 'split', 'splitext'],
127 shutil: ['copyfile', 'copymode', 'move', 'rmtree'],
128 tarfile: ['is_tarfile'],
129 }
130
131 # pyOpenSSL 0.14-1 bug: it can't handle unicode input.
132 # pyOpenSSL fix -> https://github.com/pyca/pyopenssl/pull/209
133 # Our bug: https://github.com/pymedusa/Medusa/issues/1422
134 handle_output_map = {
135 certifi: _handle_output_b
136 }
137
138 if os.name != 'nt':
139 affected_functions[os].extend(['chmod', 'chown', 'link', 'statvfs', 'symlink'])
140
141 if not fs_encoding or fs_encoding.lower() not in ('utf-8', 'mbcs'):
142 handle_input = _handle_input
143 else:
144 handle_input = None
145
146 for k, v in affected_functions.items():
147 handle_output = handle_output_map.get(k, _handle_output_u)
148 for f in v:
149 setattr(k, f, make_closure(getattr(k, f), handle_input, handle_output))
150
[end of medusa/init/filesystem.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/init/filesystem.py b/medusa/init/filesystem.py
--- a/medusa/init/filesystem.py
+++ b/medusa/init/filesystem.py
@@ -7,10 +7,12 @@
import shutil
import sys
import tarfile
-
import tempfile # noqa # pylint: disable=unused-import
+
import certifi
+import rarfile
+
from six import binary_type, text_type
@@ -126,6 +128,7 @@
'split', 'splitext'],
shutil: ['copyfile', 'copymode', 'move', 'rmtree'],
tarfile: ['is_tarfile'],
+ rarfile: ['is_rarfile'],
}
# pyOpenSSL 0.14-1 bug: it can't handle unicode input.
| {"golden_diff": "diff --git a/medusa/init/filesystem.py b/medusa/init/filesystem.py\n--- a/medusa/init/filesystem.py\n+++ b/medusa/init/filesystem.py\n@@ -7,10 +7,12 @@\n import shutil\n import sys\n import tarfile\n-\n import tempfile # noqa # pylint: disable=unused-import\n+\n import certifi\n \n+import rarfile\n+\n from six import binary_type, text_type\n \n \n@@ -126,6 +128,7 @@\n 'split', 'splitext'],\n shutil: ['copyfile', 'copymode', 'move', 'rmtree'],\n tarfile: ['is_tarfile'],\n+ rarfile: ['is_rarfile'],\n }\n \n # pyOpenSSL 0.14-1 bug: it can't handle unicode input.\n", "issue": "Post-processor crashes\n**Tried submitting this log through Medusa, but that feature doesn't work** (someone else already submitted that issue)\r\n\r\n**Branch/Commit:** commit b16e0e5eb4ded727eebd2ab2cd9683ab597662f5 (HEAD -> master, origin/master, origin/HEAD)\r\n**OS:** FreeNAS 11.0-U4\r\n**What you did:** restarted Medusa, waited 10 minutes for the post-processor to kick in\r\n**What happened:** exception generated; see below\r\n**What you expected:** downloads should've been sorted into their destination directories\r\n**Logs:**\r\n```\r\n2017-11-03 08:28:35 ERROR POSTPROCESSOR :: [b16e0e5] Exception generated: 'ascii' codec can't encode character u'\\u2019' in position 37: ordinal not in range(128)\r\nTraceback (most recent call last):\r\n File \"/usr/local/share/medusa/medusa/scheduler.py\", line 93, in run\r\n self.action.run(self.force)\r\n File \"/usr/local/share/medusa/medusa/auto_post_processor.py\", line 57, in run\r\n process_tv.ProcessResult(app.TV_DOWNLOAD_DIR, app.PROCESS_METHOD).process(force=force)\r\n File \"/usr/local/share/medusa/medusa/process_tv.py\", line 162, in process\r\n ignore_subs=ignore_subs)\r\n File \"/usr/local/share/medusa/medusa/process_tv.py\", line 334, in process_files\r\n self.process_media(path, self.video_files, force, is_priority, ignore_subs)\r\n File \"/usr/local/share/medusa/medusa/process_tv.py\", line 535, in process_media\r\n self.result = processor.process()\r\n File \"/usr/local/share/medusa/medusa/post_processor.py\", line 1093, in process\r\n self._delete(cur_ep.location, associated_files=True)\r\n File \"/usr/local/share/medusa/medusa/post_processor.py\", line 330, in _delete\r\n file_list += self.list_associated_files(file_list[0], subfolders=True)\r\n File \"/usr/local/share/medusa/medusa/post_processor.py\", line 190, in list_associated_files\r\n processed_names += filter(None, (self._rar_basename(file_path, files),))\r\n File \"/usr/local/share/medusa/medusa/post_processor.py\", line 300, in _rar_basename\r\n for rar in rars:\r\n File \"/usr/local/share/medusa/medusa/post_processor.py\", line 298, in <genexpr>\r\n rars = (x for x in files if rarfile.is_rarfile(x))\r\n File \"/usr/local/share/medusa/ext/rarfile.py\", line 404, in is_rarfile\r\n return _get_rar_version(xfile) > 0\r\n File \"/usr/local/share/medusa/ext/rarfile.py\", line 389, in _get_rar_version\r\n with XFile(xfile) as fd:\r\n File \"/usr/local/share/medusa/ext/rarfile.py\", line 2401, in __init__\r\n self._fd = open(xfile, 'rb', bufsize)\r\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\u2019' in position 37: ordinal not in range(128)\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Replace core filesystem functions.\"\"\"\n\nimport glob\nimport io\nimport os\nimport shutil\nimport sys\nimport tarfile\n\nimport tempfile # noqa # pylint: disable=unused-import\nimport certifi\n\nfrom six import binary_type, text_type\n\n\nfs_encoding = sys.getfilesystemencoding()\n\n\ndef encode(value):\n \"\"\"Encode to bytes.\"\"\"\n return value.encode('utf-8' if os.name != 'nt' else fs_encoding)\n\n\ndef decode(value):\n \"\"\"Decode to unicode.\"\"\"\n # on windows the returned info from fs operations needs to be decoded using fs encoding\n return text_type(value, 'utf-8' if os.name != 'nt' else fs_encoding)\n\n\ndef _handle_input(arg):\n \"\"\"Encode argument to utf-8 or fs encoding.\"\"\"\n # on windows the input params for fs operations needs to be encoded using fs encoding\n return encode(arg) if isinstance(arg, text_type) else arg\n\n\ndef _handle_output_u(result):\n \"\"\"Convert result to unicode.\"\"\"\n if not result:\n return result\n\n if isinstance(result, binary_type):\n return decode(result)\n\n if isinstance(result, list) or isinstance(result, tuple):\n return map(_handle_output_u, result)\n\n if isinstance(result, dict):\n for k, v in result.items():\n result[k] = _handle_output_u(v)\n return result\n\n return result\n\n\ndef _handle_output_b(result):\n \"\"\"Convert result to binary.\"\"\"\n if not result:\n return result\n\n if isinstance(result, text_type):\n return encode(result)\n\n if isinstance(result, list) or isinstance(result, tuple):\n return map(_handle_output_b, result)\n\n if isinstance(result, dict):\n for k, v in result.items():\n result[k] = _handle_output_b(v)\n return result\n\n return result\n\n\ndef _varargs(*args):\n \"\"\"Encode var arguments to utf-8 or fs encoding.\"\"\"\n return [_handle_input(arg) for arg in args]\n\n\ndef _varkwargs(**kwargs):\n \"\"\"Encode var keyword arguments to utf-8.\"\"\"\n return {k: _handle_input(arg) for k, arg in kwargs.items()}\n\n\ndef make_closure(f, handle_arg=None, handle_output=None):\n \"\"\"Apply an input handler and output handler to a function.\n\n Used to ensure UTF-8 encoding at input and output.\n \"\"\"\n return patch_output(patch_input(f, handle_arg), handle_output)\n\n\ndef patch_input(f, handle_arg=None):\n \"\"\"Patch all args and kwargs of function f.\n\n If handle_arg is None, just return the original function.\n \"\"\"\n def patched_input(*args, **kwargs):\n return f(*[handle_arg(arg) for arg in args], **{k: handle_arg(arg) for k, arg in kwargs.items()})\n return patched_input if callable(handle_arg) else f\n\n\ndef patch_output(f, handle_output=None):\n \"\"\"Patch the output of function f with the handle_output function.\n\n If handle_output is None, just return the original function.\n \"\"\"\n def patched_output(*args, **kwargs):\n return handle_output(f(*args, **kwargs))\n return patched_output if callable(handle_output) else f\n\n\ndef initialize():\n \"\"\"Replace original functions if the fs encoding is not utf-8.\"\"\"\n if hasattr(sys, '_called_from_test'):\n return\n\n affected_functions = {\n certifi: ['where', 'old_where'],\n glob: ['glob'],\n io: ['open'],\n os: ['access', 'chdir', 'listdir', 'makedirs', 'mkdir', 'remove',\n 'rename', 'renames', 'rmdir', 'stat', 'unlink', 'utime', 'walk'],\n os.path: ['abspath', 'basename', 'dirname', 'exists', 'getctime', 'getmtime', 'getsize',\n 'isabs', 'isdir', 'isfile', 'islink', 'join', 'normcase', 'normpath', 'realpath', 'relpath',\n 'split', 'splitext'],\n shutil: ['copyfile', 'copymode', 'move', 'rmtree'],\n tarfile: ['is_tarfile'],\n }\n\n # pyOpenSSL 0.14-1 bug: it can't handle unicode input.\n # pyOpenSSL fix -> https://github.com/pyca/pyopenssl/pull/209\n # Our bug: https://github.com/pymedusa/Medusa/issues/1422\n handle_output_map = {\n certifi: _handle_output_b\n }\n\n if os.name != 'nt':\n affected_functions[os].extend(['chmod', 'chown', 'link', 'statvfs', 'symlink'])\n\n if not fs_encoding or fs_encoding.lower() not in ('utf-8', 'mbcs'):\n handle_input = _handle_input\n else:\n handle_input = None\n\n for k, v in affected_functions.items():\n handle_output = handle_output_map.get(k, _handle_output_u)\n for f in v:\n setattr(k, f, make_closure(getattr(k, f), handle_input, handle_output))\n", "path": "medusa/init/filesystem.py"}]} | 2,777 | 180 |
gh_patches_debug_38883 | rasdani/github-patches | git_diff | pantsbuild__pants-6642 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python style `checker` pex requires runtime resolution of whl
Post #6618 a new `whl` is published, which is then resolved at runtime to generate a pex that can be subprocessed to check style.
This represents a slightly different model than what occurs currently with plugins, because plugins/backends can be loaded from within a deploy pex. But the `checker` whl/pex (currently) require independent resolution.
To simplify pants release processes, it would be great to allow the `checker` whl to be resolved off the python-path of the pants process itself.
</issue>
<code>
[start of contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8
9 from pants.backend.python.python_requirement import PythonRequirement
10 from pants.backend.python.subsystems.python_repos import PythonRepos
11 from pants.backend.python.subsystems.python_setup import PythonSetup
12 from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
13 from pants.backend.python.targets.python_target import PythonTarget
14 from pants.backend.python.tasks import pex_build_util
15 from pants.base.build_environment import get_buildroot, pants_version
16 from pants.base.exceptions import TaskError
17 from pants.base.hash_utils import hash_all
18 from pants.base.workunit import WorkUnitLabel
19 from pants.build_graph.address import Address
20 from pants.option.custom_types import file_option
21 from pants.task.lint_task_mixin import LintTaskMixin
22 from pants.task.task import Task
23 from pants.util.collections import factory_dict
24 from pants.util.contextutil import temporary_file
25 from pants.util.dirutil import safe_concurrent_creation
26 from pants.util.memo import memoized_classproperty, memoized_property
27 from pex.interpreter import PythonInterpreter
28 from pex.pex import PEX
29 from pex.pex_builder import PEXBuilder
30
31 from pants.contrib.python.checks.checker import checker
32 from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import \
33 default_subsystem_for_plugin
34 from pants.contrib.python.checks.tasks.checkstyle.pycodestyle_subsystem import PyCodeStyleSubsystem
35 from pants.contrib.python.checks.tasks.checkstyle.pyflakes_subsystem import FlakeCheckSubsystem
36
37
38 class Checkstyle(LintTaskMixin, Task):
39 _PYTHON_SOURCE_EXTENSION = '.py'
40
41 _CUSTOM_PLUGIN_SUBSYSTEMS = (
42 PyCodeStyleSubsystem,
43 FlakeCheckSubsystem,
44 )
45
46 @memoized_classproperty
47 def plugin_subsystems(cls):
48 subsystem_type_by_plugin_type = factory_dict(default_subsystem_for_plugin)
49 subsystem_type_by_plugin_type.update((subsystem_type.plugin_type(), subsystem_type)
50 for subsystem_type in cls._CUSTOM_PLUGIN_SUBSYSTEMS)
51 return tuple(subsystem_type_by_plugin_type[plugin_type] for plugin_type in checker.plugins())
52
53 @classmethod
54 def subsystem_dependencies(cls):
55 return super(Task, cls).subsystem_dependencies() + cls.plugin_subsystems + (
56 # Needed implicitly by the pex_build_util functions we use.
57 PythonSetup, PythonRepos)
58
59 @classmethod
60 def prepare(cls, options, round_manager):
61 round_manager.require_data(PythonInterpreter)
62
63 @classmethod
64 def register_options(cls, register):
65 super(Checkstyle, cls).register_options(register)
66 register('--severity', fingerprint=True, default='COMMENT', type=str,
67 help='Only messages at this severity or higher are logged. [COMMENT WARNING ERROR].')
68 register('--strict', fingerprint=True, type=bool,
69 help='If enabled, have non-zero exit status for any nit at WARNING or higher.')
70 register('--suppress', fingerprint=True, type=file_option, default=None,
71 help='Takes a text file where specific rules on specific files will be skipped.')
72 register('--fail', fingerprint=True, default=True, type=bool,
73 help='Prevent test failure but still produce output for problems.')
74
75 def _is_checked(self, target):
76 return (not target.is_synthetic and isinstance(target, PythonTarget) and
77 target.has_sources(self._PYTHON_SOURCE_EXTENSION))
78
79 _CHECKER_ADDRESS_SPEC = 'contrib/python/src/python/pants/contrib/python/checks/checker'
80 _CHECKER_REQ = 'pantsbuild.pants.contrib.python.checks.checker=={}'.format(pants_version())
81 _CHECKER_ENTRYPOINT = 'pants.contrib.python.checks.checker.checker:main'
82
83 @memoized_property
84 def checker_target(self):
85 self.context.resolve(self._CHECKER_ADDRESS_SPEC)
86 return self.context.build_graph.get_target(Address.parse(self._CHECKER_ADDRESS_SPEC))
87
88 def checker_pex(self, interpreter):
89 # TODO(John Sirois): Formalize in pants.base?
90 pants_dev_mode = os.environ.get('PANTS_DEV')
91
92 if pants_dev_mode:
93 checker_id = self.checker_target.transitive_invalidation_hash()
94 else:
95 checker_id = hash_all([self._CHECKER_REQ])
96
97 pex_path = os.path.join(self.workdir, 'checker', checker_id, str(interpreter.identity))
98 if not os.path.exists(pex_path):
99 with self.context.new_workunit(name='build-checker'):
100 with safe_concurrent_creation(pex_path) as chroot:
101 builder = PEXBuilder(path=chroot, interpreter=interpreter)
102
103 if pants_dev_mode:
104 pex_build_util.dump_sources(builder, tgt=self.checker_target, log=self.context.log)
105 req_libs = [tgt for tgt in self.checker_target.closure()
106 if isinstance(tgt, PythonRequirementLibrary)]
107 pex_build_util.dump_requirement_libs(builder,
108 interpreter=interpreter,
109 req_libs=req_libs,
110 log=self.context.log)
111 else:
112 pex_build_util.dump_requirements(builder,
113 interpreter=interpreter,
114 reqs=[PythonRequirement(self._CHECKER_REQ)],
115 log=self.context.log)
116 builder.set_entry_point(self._CHECKER_ENTRYPOINT)
117 builder.freeze()
118
119 return PEX(pex_path, interpreter=interpreter)
120
121 def checkstyle(self, interpreter, sources):
122 """Iterate over sources and run checker on each file.
123
124 Files can be suppressed with a --suppress option which takes an xml file containing
125 file paths that have exceptions and the plugins they need to ignore.
126
127 :param sources: iterable containing source file names.
128 :return: (int) number of failures
129 """
130 checker = self.checker_pex(interpreter)
131
132 args = [
133 '--root-dir={}'.format(get_buildroot()),
134 '--severity={}'.format(self.get_options().severity),
135 ]
136 if self.get_options().suppress:
137 args.append('--suppress={}'.format(self.get_options().suppress))
138 if self.get_options().strict:
139 args.append('--strict')
140
141 with temporary_file(binary_mode=False) as argfile:
142 for plugin_subsystem in self.plugin_subsystems:
143 options_blob = plugin_subsystem.global_instance().options_blob()
144 if options_blob:
145 argfile.write('--{}-options={}\n'.format(plugin_subsystem.plugin_type().name(),
146 options_blob))
147 argfile.write('\n'.join(sources))
148 argfile.close()
149
150 args.append('@{}'.format(argfile.name))
151
152 with self.context.new_workunit(name='pythonstyle',
153 labels=[WorkUnitLabel.TOOL, WorkUnitLabel.LINT],
154 cmd=' '.join(checker.cmdline(args))) as workunit:
155 failure_count = checker.run(args=args,
156 stdout=workunit.output('stdout'),
157 stderr=workunit.output('stderr'))
158 if failure_count > 0 and self.get_options().fail:
159 raise TaskError('{} Python Style issues found. You may try `./pants fmt <targets>`'
160 .format(failure_count))
161 return failure_count
162
163 def execute(self):
164 """"Run Checkstyle on all found non-synthetic source files."""
165
166 # If we are linting for Python 3, skip lint altogether.
167 # Long-term Python 3 linting solution tracked by:
168 # https://github.com/pantsbuild/pants/issues/5764
169 interpreter = self.context.products.get_data(PythonInterpreter)
170 # Check interpreter is not 'None' for test cases that do not
171 # run the python interpreter selection task.
172 if interpreter and interpreter.version >= (3, 0, 0):
173 self.context.log.info('Linting is currently disabled for Python 3 targets.\n '
174 'See https://github.com/pantsbuild/pants/issues/5764 for '
175 'long-term solution tracking.')
176 return
177
178 with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:
179 sources = self.calculate_sources([vt.target for vt in invalidation_check.invalid_vts])
180 if sources:
181 return self.checkstyle(interpreter, sources)
182
183 def calculate_sources(self, targets):
184 """Generate a set of source files from the given targets."""
185 sources = set()
186 for target in targets:
187 sources.update(
188 source for source in target.sources_relative_to_buildroot()
189 if source.endswith(self._PYTHON_SOURCE_EXTENSION)
190 )
191 return sources
192
[end of contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py b/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py
--- a/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py
+++ b/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py
@@ -5,6 +5,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import os
+import sys
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.subsystems.python_repos import PythonRepos
@@ -27,6 +28,7 @@
from pex.interpreter import PythonInterpreter
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
+from pkg_resources import DistributionNotFound, Requirement, WorkingSet
from pants.contrib.python.checks.checker import checker
from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import \
@@ -56,6 +58,10 @@
# Needed implicitly by the pex_build_util functions we use.
PythonSetup, PythonRepos)
+ @classmethod
+ def implementation_version(cls):
+ return super(Checkstyle, cls).implementation_version() + [('Checkstyle', 0)]
+
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(PythonInterpreter)
@@ -109,10 +115,21 @@
req_libs=req_libs,
log=self.context.log)
else:
- pex_build_util.dump_requirements(builder,
- interpreter=interpreter,
- reqs=[PythonRequirement(self._CHECKER_REQ)],
- log=self.context.log)
+ try:
+ # The checker is already on sys.path, eg: embedded in pants.pex.
+ working_set = WorkingSet(entries=sys.path)
+ for dist in working_set.resolve([Requirement.parse(self._CHECKER_REQ)]):
+ for req in dist.requires():
+ builder.add_requirement(req)
+ builder.add_distribution(dist)
+ builder.add_requirement(self._CHECKER_REQ)
+ except DistributionNotFound:
+ # We need to resolve the checker from a local or remote distribution repo.
+ pex_build_util.dump_requirements(builder,
+ interpreter=interpreter,
+ reqs=[PythonRequirement(self._CHECKER_REQ)],
+ log=self.context.log)
+
builder.set_entry_point(self._CHECKER_ENTRYPOINT)
builder.freeze()
| {"golden_diff": "diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py b/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py\n--- a/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py\n+++ b/contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py\n@@ -5,6 +5,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import os\n+import sys\n \n from pants.backend.python.python_requirement import PythonRequirement\n from pants.backend.python.subsystems.python_repos import PythonRepos\n@@ -27,6 +28,7 @@\n from pex.interpreter import PythonInterpreter\n from pex.pex import PEX\n from pex.pex_builder import PEXBuilder\n+from pkg_resources import DistributionNotFound, Requirement, WorkingSet\n \n from pants.contrib.python.checks.checker import checker\n from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import \\\n@@ -56,6 +58,10 @@\n # Needed implicitly by the pex_build_util functions we use.\n PythonSetup, PythonRepos)\n \n+ @classmethod\n+ def implementation_version(cls):\n+ return super(Checkstyle, cls).implementation_version() + [('Checkstyle', 0)]\n+\n @classmethod\n def prepare(cls, options, round_manager):\n round_manager.require_data(PythonInterpreter)\n@@ -109,10 +115,21 @@\n req_libs=req_libs,\n log=self.context.log)\n else:\n- pex_build_util.dump_requirements(builder,\n- interpreter=interpreter,\n- reqs=[PythonRequirement(self._CHECKER_REQ)],\n- log=self.context.log)\n+ try:\n+ # The checker is already on sys.path, eg: embedded in pants.pex.\n+ working_set = WorkingSet(entries=sys.path)\n+ for dist in working_set.resolve([Requirement.parse(self._CHECKER_REQ)]):\n+ for req in dist.requires():\n+ builder.add_requirement(req)\n+ builder.add_distribution(dist)\n+ builder.add_requirement(self._CHECKER_REQ)\n+ except DistributionNotFound:\n+ # We need to resolve the checker from a local or remote distribution repo.\n+ pex_build_util.dump_requirements(builder,\n+ interpreter=interpreter,\n+ reqs=[PythonRequirement(self._CHECKER_REQ)],\n+ log=self.context.log)\n+\n builder.set_entry_point(self._CHECKER_ENTRYPOINT)\n builder.freeze()\n", "issue": "Python style `checker` pex requires runtime resolution of whl\nPost #6618 a new `whl` is published, which is then resolved at runtime to generate a pex that can be subprocessed to check style.\r\n\r\nThis represents a slightly different model than what occurs currently with plugins, because plugins/backends can be loaded from within a deploy pex. But the `checker` whl/pex (currently) require independent resolution.\r\n\r\nTo simplify pants release processes, it would be great to allow the `checker` whl to be resolved off the python-path of the pants process itself.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.backend.python.subsystems.python_repos import PythonRepos\nfrom pants.backend.python.subsystems.python_setup import PythonSetup\nfrom pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary\nfrom pants.backend.python.targets.python_target import PythonTarget\nfrom pants.backend.python.tasks import pex_build_util\nfrom pants.base.build_environment import get_buildroot, pants_version\nfrom pants.base.exceptions import TaskError\nfrom pants.base.hash_utils import hash_all\nfrom pants.base.workunit import WorkUnitLabel\nfrom pants.build_graph.address import Address\nfrom pants.option.custom_types import file_option\nfrom pants.task.lint_task_mixin import LintTaskMixin\nfrom pants.task.task import Task\nfrom pants.util.collections import factory_dict\nfrom pants.util.contextutil import temporary_file\nfrom pants.util.dirutil import safe_concurrent_creation\nfrom pants.util.memo import memoized_classproperty, memoized_property\nfrom pex.interpreter import PythonInterpreter\nfrom pex.pex import PEX\nfrom pex.pex_builder import PEXBuilder\n\nfrom pants.contrib.python.checks.checker import checker\nfrom pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import \\\n default_subsystem_for_plugin\nfrom pants.contrib.python.checks.tasks.checkstyle.pycodestyle_subsystem import PyCodeStyleSubsystem\nfrom pants.contrib.python.checks.tasks.checkstyle.pyflakes_subsystem import FlakeCheckSubsystem\n\n\nclass Checkstyle(LintTaskMixin, Task):\n _PYTHON_SOURCE_EXTENSION = '.py'\n\n _CUSTOM_PLUGIN_SUBSYSTEMS = (\n PyCodeStyleSubsystem,\n FlakeCheckSubsystem,\n )\n\n @memoized_classproperty\n def plugin_subsystems(cls):\n subsystem_type_by_plugin_type = factory_dict(default_subsystem_for_plugin)\n subsystem_type_by_plugin_type.update((subsystem_type.plugin_type(), subsystem_type)\n for subsystem_type in cls._CUSTOM_PLUGIN_SUBSYSTEMS)\n return tuple(subsystem_type_by_plugin_type[plugin_type] for plugin_type in checker.plugins())\n\n @classmethod\n def subsystem_dependencies(cls):\n return super(Task, cls).subsystem_dependencies() + cls.plugin_subsystems + (\n # Needed implicitly by the pex_build_util functions we use.\n PythonSetup, PythonRepos)\n\n @classmethod\n def prepare(cls, options, round_manager):\n round_manager.require_data(PythonInterpreter)\n\n @classmethod\n def register_options(cls, register):\n super(Checkstyle, cls).register_options(register)\n register('--severity', fingerprint=True, default='COMMENT', type=str,\n help='Only messages at this severity or higher are logged. [COMMENT WARNING ERROR].')\n register('--strict', fingerprint=True, type=bool,\n help='If enabled, have non-zero exit status for any nit at WARNING or higher.')\n register('--suppress', fingerprint=True, type=file_option, default=None,\n help='Takes a text file where specific rules on specific files will be skipped.')\n register('--fail', fingerprint=True, default=True, type=bool,\n help='Prevent test failure but still produce output for problems.')\n\n def _is_checked(self, target):\n return (not target.is_synthetic and isinstance(target, PythonTarget) and\n target.has_sources(self._PYTHON_SOURCE_EXTENSION))\n\n _CHECKER_ADDRESS_SPEC = 'contrib/python/src/python/pants/contrib/python/checks/checker'\n _CHECKER_REQ = 'pantsbuild.pants.contrib.python.checks.checker=={}'.format(pants_version())\n _CHECKER_ENTRYPOINT = 'pants.contrib.python.checks.checker.checker:main'\n\n @memoized_property\n def checker_target(self):\n self.context.resolve(self._CHECKER_ADDRESS_SPEC)\n return self.context.build_graph.get_target(Address.parse(self._CHECKER_ADDRESS_SPEC))\n\n def checker_pex(self, interpreter):\n # TODO(John Sirois): Formalize in pants.base?\n pants_dev_mode = os.environ.get('PANTS_DEV')\n\n if pants_dev_mode:\n checker_id = self.checker_target.transitive_invalidation_hash()\n else:\n checker_id = hash_all([self._CHECKER_REQ])\n\n pex_path = os.path.join(self.workdir, 'checker', checker_id, str(interpreter.identity))\n if not os.path.exists(pex_path):\n with self.context.new_workunit(name='build-checker'):\n with safe_concurrent_creation(pex_path) as chroot:\n builder = PEXBuilder(path=chroot, interpreter=interpreter)\n\n if pants_dev_mode:\n pex_build_util.dump_sources(builder, tgt=self.checker_target, log=self.context.log)\n req_libs = [tgt for tgt in self.checker_target.closure()\n if isinstance(tgt, PythonRequirementLibrary)]\n pex_build_util.dump_requirement_libs(builder,\n interpreter=interpreter,\n req_libs=req_libs,\n log=self.context.log)\n else:\n pex_build_util.dump_requirements(builder,\n interpreter=interpreter,\n reqs=[PythonRequirement(self._CHECKER_REQ)],\n log=self.context.log)\n builder.set_entry_point(self._CHECKER_ENTRYPOINT)\n builder.freeze()\n\n return PEX(pex_path, interpreter=interpreter)\n\n def checkstyle(self, interpreter, sources):\n \"\"\"Iterate over sources and run checker on each file.\n\n Files can be suppressed with a --suppress option which takes an xml file containing\n file paths that have exceptions and the plugins they need to ignore.\n\n :param sources: iterable containing source file names.\n :return: (int) number of failures\n \"\"\"\n checker = self.checker_pex(interpreter)\n\n args = [\n '--root-dir={}'.format(get_buildroot()),\n '--severity={}'.format(self.get_options().severity),\n ]\n if self.get_options().suppress:\n args.append('--suppress={}'.format(self.get_options().suppress))\n if self.get_options().strict:\n args.append('--strict')\n\n with temporary_file(binary_mode=False) as argfile:\n for plugin_subsystem in self.plugin_subsystems:\n options_blob = plugin_subsystem.global_instance().options_blob()\n if options_blob:\n argfile.write('--{}-options={}\\n'.format(plugin_subsystem.plugin_type().name(),\n options_blob))\n argfile.write('\\n'.join(sources))\n argfile.close()\n\n args.append('@{}'.format(argfile.name))\n\n with self.context.new_workunit(name='pythonstyle',\n labels=[WorkUnitLabel.TOOL, WorkUnitLabel.LINT],\n cmd=' '.join(checker.cmdline(args))) as workunit:\n failure_count = checker.run(args=args,\n stdout=workunit.output('stdout'),\n stderr=workunit.output('stderr'))\n if failure_count > 0 and self.get_options().fail:\n raise TaskError('{} Python Style issues found. You may try `./pants fmt <targets>`'\n .format(failure_count))\n return failure_count\n\n def execute(self):\n \"\"\"\"Run Checkstyle on all found non-synthetic source files.\"\"\"\n\n # If we are linting for Python 3, skip lint altogether.\n # Long-term Python 3 linting solution tracked by:\n # https://github.com/pantsbuild/pants/issues/5764\n interpreter = self.context.products.get_data(PythonInterpreter)\n # Check interpreter is not 'None' for test cases that do not\n # run the python interpreter selection task.\n if interpreter and interpreter.version >= (3, 0, 0):\n self.context.log.info('Linting is currently disabled for Python 3 targets.\\n '\n 'See https://github.com/pantsbuild/pants/issues/5764 for '\n 'long-term solution tracking.')\n return\n\n with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:\n sources = self.calculate_sources([vt.target for vt in invalidation_check.invalid_vts])\n if sources:\n return self.checkstyle(interpreter, sources)\n\n def calculate_sources(self, targets):\n \"\"\"Generate a set of source files from the given targets.\"\"\"\n sources = set()\n for target in targets:\n sources.update(\n source for source in target.sources_relative_to_buildroot()\n if source.endswith(self._PYTHON_SOURCE_EXTENSION)\n )\n return sources\n", "path": "contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/checkstyle.py"}]} | 2,957 | 547 |
gh_patches_debug_26284 | rasdani/github-patches | git_diff | python-poetry__poetry-123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poetry script: unable to find entry point module, if package resides in src
`poetry script` is unable to find the entry point module in `src`
```
[NameError]
name 'Module' is not defined
script <script-name> [<args>]...
```
</issue>
<code>
[start of poetry/console/commands/script.py]
1 import sys
2
3 from .venv_command import VenvCommand
4
5
6 class ScriptCommand(VenvCommand):
7 """
8 Executes a script defined in <comment>pyproject.toml</comment>
9
10 script
11 { script-name : The name of the script to execute }
12 { args?* : The command and arguments/options to pass to the script. }
13 """
14
15 def handle(self):
16 script = self.argument('script-name')
17 argv = [script] + self.argument('args')
18
19 scripts = self.poetry.local_config.get('scripts')
20 if not scripts:
21 raise RuntimeError('No scripts defined in pyproject.toml')
22
23 if script not in scripts:
24 raise ValueError('Script {} is not defined'.format(script))
25
26 module, callable_ = scripts[script].split(':')
27
28 cmd = ['python', '-c']
29
30 cmd += [
31 '"import sys; '
32 'from importlib import import_module; '
33 'sys.argv = {!r}; '
34 'import_module(\'{}\').{}()"'.format(
35 argv, module, callable_
36 )
37 ]
38
39 self.venv.run(*cmd, shell=True, call=True)
40
41 def merge_application_definition(self, merge_args=True):
42 if self._application is None \
43 or (self._application_definition_merged
44 and (self._application_definition_merged_with_args or not merge_args)):
45 return
46
47 if merge_args:
48 current_arguments = self._definition.get_arguments()
49 self._definition.set_arguments(self._application.get_definition().get_arguments())
50 self._definition.add_arguments(current_arguments)
51
52 self._application_definition_merged = True
53 if merge_args:
54 self._application_definition_merged_with_args = True
55
[end of poetry/console/commands/script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/console/commands/script.py b/poetry/console/commands/script.py
--- a/poetry/console/commands/script.py
+++ b/poetry/console/commands/script.py
@@ -1,5 +1,6 @@
import sys
+from ...masonry.utils.module import Module
from .venv_command import VenvCommand
@@ -25,19 +26,32 @@
module, callable_ = scripts[script].split(':')
+ src_in_sys_path = 'sys.path.append(\'src\'); '\
+ if self._module.is_in_src() else ''
+
cmd = ['python', '-c']
cmd += [
'"import sys; '
'from importlib import import_module; '
- 'sys.argv = {!r}; '
+ 'sys.argv = {!r}; {}'
'import_module(\'{}\').{}()"'.format(
- argv, module, callable_
+ argv, src_in_sys_path, module, callable_
)
]
self.venv.run(*cmd, shell=True, call=True)
+ @property
+ def _module(self):
+ poetry = self.poetry
+ package = poetry.package
+ path = poetry.file.parent
+ module = Module(
+ package.name, path.as_posix()
+ )
+ return module
+
def merge_application_definition(self, merge_args=True):
if self._application is None \
or (self._application_definition_merged
| {"golden_diff": "diff --git a/poetry/console/commands/script.py b/poetry/console/commands/script.py\n--- a/poetry/console/commands/script.py\n+++ b/poetry/console/commands/script.py\n@@ -1,5 +1,6 @@\n import sys\n \n+from ...masonry.utils.module import Module\n from .venv_command import VenvCommand\n \n \n@@ -25,19 +26,32 @@\n \n module, callable_ = scripts[script].split(':')\n \n+ src_in_sys_path = 'sys.path.append(\\'src\\'); '\\\n+ if self._module.is_in_src() else ''\n+\n cmd = ['python', '-c']\n \n cmd += [\n '\"import sys; '\n 'from importlib import import_module; '\n- 'sys.argv = {!r}; '\n+ 'sys.argv = {!r}; {}'\n 'import_module(\\'{}\\').{}()\"'.format(\n- argv, module, callable_\n+ argv, src_in_sys_path, module, callable_\n )\n ]\n \n self.venv.run(*cmd, shell=True, call=True)\n \n+ @property\n+ def _module(self):\n+ poetry = self.poetry\n+ package = poetry.package\n+ path = poetry.file.parent\n+ module = Module(\n+ package.name, path.as_posix()\n+ )\n+ return module\n+\n def merge_application_definition(self, merge_args=True):\n if self._application is None \\\n or (self._application_definition_merged\n", "issue": "poetry script: unable to find entry point module, if package resides in src\n`poetry script` is unable to find the entry point module in `src`\r\n\r\n```\r\n[NameError]\r\nname 'Module' is not defined\r\nscript <script-name> [<args>]...\r\n```\n", "before_files": [{"content": "import sys\n\nfrom .venv_command import VenvCommand\n\n\nclass ScriptCommand(VenvCommand):\n \"\"\"\n Executes a script defined in <comment>pyproject.toml</comment>\n\n script\n { script-name : The name of the script to execute }\n { args?* : The command and arguments/options to pass to the script. }\n \"\"\"\n\n def handle(self):\n script = self.argument('script-name')\n argv = [script] + self.argument('args')\n\n scripts = self.poetry.local_config.get('scripts')\n if not scripts:\n raise RuntimeError('No scripts defined in pyproject.toml')\n\n if script not in scripts:\n raise ValueError('Script {} is not defined'.format(script))\n\n module, callable_ = scripts[script].split(':')\n\n cmd = ['python', '-c']\n\n cmd += [\n '\"import sys; '\n 'from importlib import import_module; '\n 'sys.argv = {!r}; '\n 'import_module(\\'{}\\').{}()\"'.format(\n argv, module, callable_\n )\n ]\n\n self.venv.run(*cmd, shell=True, call=True)\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None \\\n or (self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(self._application.get_definition().get_arguments())\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/script.py"}]} | 1,061 | 330 |
gh_patches_debug_1682 | rasdani/github-patches | git_diff | pymedusa__Medusa-9273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IPT Provider error
Hello, it has been like a month since I am having this problem.
Medusa is unable to use IPTorrents to search and download, it always worked perfect until one day. I have double check the cookie values and they are an exact match.
Anyone can help me? here's the log with the error
2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: [2ab9d45] Unable to find manual results for: Snowpiercer - S02E02 - Smolder to Life
2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing season pack search for Snowpiercer
2021-02-09 16:18:43 WARNING FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Please configure the required cookies for this provider. Check your provider settings
2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Unknown exception in url https://iptorrents.eu Error: Cloudflare IUAM possibility malformed, issue extracing delay value.
2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing episode search for Snowpiercer
Could it be because it's using iptorrents.eu instead of iptorrents.com?
</issue>
<code>
[start of medusa/providers/torrent/html/iptorrents.py]
1 # coding=utf-8
2
3 """Provider code for IPTorrents."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import re
9
10 from medusa import tv
11 from medusa.bs4_parser import BS4Parser
12 from medusa.helper.common import convert_size
13 from medusa.logger.adapters.style import BraceAdapter
14 from medusa.providers.torrent.torrent_provider import TorrentProvider
15
16 from requests.compat import urljoin
17
18 log = BraceAdapter(logging.getLogger(__name__))
19 log.logger.addHandler(logging.NullHandler())
20
21
22 class IPTorrentsProvider(TorrentProvider):
23 """IPTorrents Torrent provider."""
24
25 def __init__(self):
26 """Initialize the class."""
27 super(IPTorrentsProvider, self).__init__('IPTorrents')
28
29 # URLs
30 self.url = 'https://iptorrents.eu'
31 self.urls = {
32 'base_url': self.url,
33 'login': urljoin(self.url, 'torrents'),
34 'search': urljoin(self.url, 't?%s%s&q=%s&qf=#torrents'),
35 }
36
37 # Proper Strings
38
39 # Miscellaneous Options
40 self.freeleech = False
41 self.enable_cookies = True
42 self.cookies = ''
43 self.required_cookies = ('uid', 'pass')
44 self.categories = '73=&60='
45
46 # Cache
47 self.cache = tv.Cache(self)
48
49 def search(self, search_strings, age=0, ep_obj=None, **kwargs):
50 """
51 Search a provider and parse the results.
52
53 :param search_strings: A dict with mode (key) and the search value (value)
54 :param age: Not used
55 :param ep_obj: Not used
56 :returns: A list of search results (structure)
57 """
58 results = []
59 if not self.login():
60 return results
61
62 freeleech = '&free=on' if self.freeleech else ''
63
64 for mode in search_strings:
65 log.debug('Search mode: {0}', mode)
66
67 for search_string in search_strings[mode]:
68
69 if mode != 'RSS':
70 log.debug('Search string: {search}',
71 {'search': search_string})
72
73 # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
74 search_url = self.urls['search'] % (self.categories, freeleech, search_string)
75 search_url += ';o=seeders' if mode != 'RSS' else ''
76
77 response = self.session.get(search_url)
78 if not response or not response.text:
79 log.debug('No data returned from provider')
80 continue
81
82 data = re.sub(r'(?im)<button.+?<[/]button>', '', response.text, 0)
83
84 results += self.parse(data, mode)
85
86 return results
87
88 def parse(self, data, mode):
89 """
90 Parse search results for items.
91
92 :param data: The raw response from a search
93 :param mode: The current mode used to search, e.g. RSS
94
95 :return: A list of items found
96 """
97 items = []
98
99 with BS4Parser(data, 'html5lib') as html:
100 torrent_table = html.find('table', id='torrents')
101 torrents = torrent_table('tr') if torrent_table else []
102
103 # Continue only if at least one release is found
104 if len(torrents) < 2 or html.find(text='No Torrents Found!'):
105 log.debug('Data returned from provider does not contain any torrents')
106 return items
107
108 # Skip column headers
109 for row in torrents[1:]:
110 try:
111 table_data = row('td')
112 title = table_data[1].find('a').text
113 download_url = self.urls['base_url'] + table_data[3].find('a')['href']
114 if not all([title, download_url]):
115 continue
116
117 seeders = int(table_data[7].text)
118 leechers = int(table_data[8].text)
119
120 # Filter unseeded torrent
121 if seeders < self.minseed:
122 if mode != 'RSS':
123 log.debug("Discarding torrent because it doesn't meet the"
124 ' minimum seeders: {0}. Seeders: {1}',
125 title, seeders)
126 continue
127
128 torrent_size = table_data[5].text
129 size = convert_size(torrent_size) or -1
130
131 pubdate_raw = table_data[1].find('div').get_text().split('|')[-1].strip()
132 pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
133
134 item = {
135 'title': title,
136 'link': download_url,
137 'size': size,
138 'seeders': seeders,
139 'leechers': leechers,
140 'pubdate': pubdate,
141 }
142 if mode != 'RSS':
143 log.debug('Found result: {0} with {1} seeders and {2} leechers',
144 title, seeders, leechers)
145
146 items.append(item)
147 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
148 log.exception('Failed parsing provider.')
149
150 return items
151
152 def login(self):
153 """Login method used for logging in before doing search and torrent downloads."""
154 return self.cookie_login('sign in')
155
156
157 provider = IPTorrentsProvider()
158
[end of medusa/providers/torrent/html/iptorrents.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/providers/torrent/html/iptorrents.py b/medusa/providers/torrent/html/iptorrents.py
--- a/medusa/providers/torrent/html/iptorrents.py
+++ b/medusa/providers/torrent/html/iptorrents.py
@@ -42,6 +42,7 @@
self.cookies = ''
self.required_cookies = ('uid', 'pass')
self.categories = '73=&60='
+ self.custom_url = None
# Cache
self.cache = tv.Cache(self)
| {"golden_diff": "diff --git a/medusa/providers/torrent/html/iptorrents.py b/medusa/providers/torrent/html/iptorrents.py\n--- a/medusa/providers/torrent/html/iptorrents.py\n+++ b/medusa/providers/torrent/html/iptorrents.py\n@@ -42,6 +42,7 @@\n self.cookies = ''\n self.required_cookies = ('uid', 'pass')\n self.categories = '73=&60='\n+ self.custom_url = None\n \n # Cache\n self.cache = tv.Cache(self)\n", "issue": "IPT Provider error\nHello, it has been like a month since I am having this problem.\r\nMedusa is unable to use IPTorrents to search and download, it always worked perfect until one day. I have double check the cookie values and they are an exact match.\r\nAnyone can help me? here's the log with the error\r\n\r\n2021-02-09 16:18:43 INFO\tFORCEDSEARCHQUEUE-MANUAL-364928 :: [2ab9d45] Unable to find manual results for: Snowpiercer - S02E02 - Smolder to Life\r\n2021-02-09 16:18:43 INFO\tFORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing season pack search for Snowpiercer\r\n2021-02-09 16:18:43 WARNING\tFORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Please configure the required cookies for this provider. Check your provider settings\r\n2021-02-09 16:18:43 INFO\tFORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Unknown exception in url https://iptorrents.eu Error: Cloudflare IUAM possibility malformed, issue extracing delay value.\r\n2021-02-09 16:18:43 INFO\tFORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing episode search for Snowpiercer\r\n\r\nCould it be because it's using iptorrents.eu instead of iptorrents.com?\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for IPTorrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass IPTorrentsProvider(TorrentProvider):\n \"\"\"IPTorrents Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(IPTorrentsProvider, self).__init__('IPTorrents')\n\n # URLs\n self.url = 'https://iptorrents.eu'\n self.urls = {\n 'base_url': self.url,\n 'login': urljoin(self.url, 'torrents'),\n 'search': urljoin(self.url, 't?%s%s&q=%s&qf=#torrents'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.freeleech = False\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('uid', 'pass')\n self.categories = '73=&60='\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n freeleech = '&free=on' if self.freeleech else ''\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile\n search_url = self.urls['search'] % (self.categories, freeleech, search_string)\n search_url += ';o=seeders' if mode != 'RSS' else ''\n\n response = self.session.get(search_url)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n data = re.sub(r'(?im)<button.+?<[/]button>', '', response.text, 0)\n\n results += self.parse(data, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', id='torrents')\n torrents = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one release is found\n if len(torrents) < 2 or html.find(text='No Torrents Found!'):\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for row in torrents[1:]:\n try:\n table_data = row('td')\n title = table_data[1].find('a').text\n download_url = self.urls['base_url'] + table_data[3].find('a')['href']\n if not all([title, download_url]):\n continue\n\n seeders = int(table_data[7].text)\n leechers = int(table_data[8].text)\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = table_data[5].text\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = table_data[1].find('div').get_text().split('|')[-1].strip()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('sign in')\n\n\nprovider = IPTorrentsProvider()\n", "path": "medusa/providers/torrent/html/iptorrents.py"}]} | 2,472 | 120 |
gh_patches_debug_17170 | rasdani/github-patches | git_diff | qtile__qtile-2592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
migrations need version annotations
We have cases where migrations are non-idempotent, so we should only run them when upgrading from a particular version to a different one.
My suggestion is to write a `qtile_version = "$whatever` into users' configs after they've done a migration, and then we can parse that and only run migrations newer than that.
We'll need to annotate the migrations we have somehow, but we can do that reasonably easily (a decorator, make them callable classes, whatever).
Thoughts?
</issue>
<code>
[start of libqtile/scripts/migrate.py]
1 # Copyright (c) 2021, Tycho Andersen. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 import filecmp
20 import os
21 import os.path
22 import shutil
23 import sys
24 from glob import glob
25
26 BACKUP_SUFFIX = ".migrate.bak"
27
28 try:
29 import bowler
30 except ImportError:
31 pass
32
33
34 def rename_hook(query, fro, to):
35 # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >
36 # but the replacement gets more complicated...
37 selector = "'{name}'".format(name=fro)
38 q = query.select_pattern(selector)
39 q.current.kwargs["name"] = fro
40 return q.rename(to)
41
42
43 def client_name_updated(query):
44 """ Rename window_name_change -> client_name_updated"""
45 return rename_hook(query, "window_name_change", "client_name_updated")
46
47
48 def tile_master_windows_rename(query):
49 return (
50 query
51 .select_function("Tile")
52 .modify_argument("masterWindows", "master_length")
53 )
54
55
56 def threaded_poll_text_rename(query):
57 return (
58 query
59 .select_class("ThreadedPollText")
60 .rename("ThreadPoolText")
61 )
62
63
64 def pacman_to_checkupdates(query):
65 return (
66 query
67 .select_class("Pacman")
68 .rename("CheckUpdates")
69 )
70
71
72 def hook_main_function(query):
73 def modify_main(node, capture, filename):
74 main = capture.get("function_def")
75 if main.prev_sibling:
76 for leaf in main.prev_sibling.leaves():
77 if "startup" == leaf.value:
78 return
79 args = capture.get("function_arguments")
80 if args:
81 args[0].remove()
82 main.prefix += "from libqtile import hook, qtile\n"
83 main.prefix += "@hook.subscribe.startup\n"
84
85 return (
86 query
87 .select_function("main")
88 .is_def()
89 .modify(modify_main)
90 )
91
92
93 # Deprecated new_at_current key replaced by new_client_position.
94 # In the node, we want to change the key name
95 # and adapts its value depending of the previous value :
96 # new_at_current=True => new_client_position=before_current
97 # new_at_current<>True => new_client_position=after_current
98 def update_node_nac(node, capture, filename):
99 key = capture.get("k")
100 key.value = "new_client_position"
101 val = capture.get("v")
102 if val.value == "True":
103 val.value = "'before_current'"
104 else:
105 val.value = "'after_current'"
106
107
108 def new_at_current_to_new_client_position(query):
109 old_pattern = """
110 argument< k="new_at_current" "=" v=any >
111 """
112 return (
113 query
114 .select(old_pattern)
115 .modify(update_node_nac)
116 )
117
118
119 MIGRATIONS = [
120 client_name_updated,
121 tile_master_windows_rename,
122 threaded_poll_text_rename,
123 pacman_to_checkupdates,
124 hook_main_function,
125 new_at_current_to_new_client_position,
126 ]
127
128
129 MODULE_RENAMES = [
130 ("libqtile.command_graph", "libqtile.command.graph"),
131 ("libqtile.command_client", "libqtile.command.client"),
132 ("libqtile.command_interface", "libqtile.command.interface"),
133 ("libqtile.command_object", "libqtile.command.base"),
134 ("libqtile.window", "libqtile.backend.x11.window"),
135 ]
136
137 for (fro, to) in MODULE_RENAMES:
138 def f(query, fro=fro, to=to):
139 return (
140 query
141 .select_module(fro)
142 .rename(to)
143 )
144 MIGRATIONS.append(f)
145
146
147 def file_and_backup(config_dir):
148 for py in glob(os.path.join(config_dir, "*.py")):
149 backup = py + BACKUP_SUFFIX
150 yield py, backup
151
152
153 def do_migrate(args):
154 if "bowler" not in sys.modules:
155 print("bowler can't be found, not migrating config file")
156 print("install it and try again")
157 sys.exit(1)
158
159 config_dir = os.path.dirname(args.config)
160 for py, backup in file_and_backup(config_dir):
161 shutil.copyfile(py, backup)
162
163 for m in MIGRATIONS:
164 q = bowler.Query(config_dir)
165 m(q).execute(interactive=args.interactive, write=True)
166
167 changed = False
168 for py, backup in file_and_backup(config_dir):
169 backup = py + BACKUP_SUFFIX
170 if not filecmp.cmp(py, backup, shallow=False):
171 changed = True
172 break
173
174 if not changed:
175 print("Config unchanged.")
176 for _, backup in file_and_backup(config_dir):
177 os.remove(backup)
178
179
180 def add_subcommand(subparsers, parents):
181 parser = subparsers.add_parser(
182 "migrate",
183 parents=parents,
184 help="Migrate a configuration file to the current API"
185 )
186 parser.add_argument(
187 "-c",
188 "--config",
189 action="store",
190 default=os.path.expanduser(
191 os.path.join(os.getenv("XDG_CONFIG_HOME", "~/.config"), "qtile", "config.py")
192 ),
193 help="Use the specified configuration file (migrates every .py file in this directory)",
194 )
195 parser.add_argument(
196 "--interactive",
197 action="store_true",
198 help="Interactively apply diff (similar to git add -p)",
199 )
200 parser.set_defaults(func=do_migrate)
201
[end of libqtile/scripts/migrate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py
--- a/libqtile/scripts/migrate.py
+++ b/libqtile/scripts/migrate.py
@@ -162,7 +162,7 @@
for m in MIGRATIONS:
q = bowler.Query(config_dir)
- m(q).execute(interactive=args.interactive, write=True)
+ m(q).execute(interactive=not args.yes, write=True)
changed = False
for py, backup in file_and_backup(config_dir):
@@ -193,8 +193,8 @@
help="Use the specified configuration file (migrates every .py file in this directory)",
)
parser.add_argument(
- "--interactive",
+ "--yes",
action="store_true",
- help="Interactively apply diff (similar to git add -p)",
+ help="Automatically apply diffs with no confirmation",
)
parser.set_defaults(func=do_migrate)
| {"golden_diff": "diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py\n--- a/libqtile/scripts/migrate.py\n+++ b/libqtile/scripts/migrate.py\n@@ -162,7 +162,7 @@\n \n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n- m(q).execute(interactive=args.interactive, write=True)\n+ m(q).execute(interactive=not args.yes, write=True)\n \n changed = False\n for py, backup in file_and_backup(config_dir):\n@@ -193,8 +193,8 @@\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n- \"--interactive\",\n+ \"--yes\",\n action=\"store_true\",\n- help=\"Interactively apply diff (similar to git add -p)\",\n+ help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "issue": "migrations need version annotations\nWe have cases where migrations are non-idempotent, so we should only run them when upgrading from a particular version to a different one.\r\n\r\nMy suggestion is to write a `qtile_version = \"$whatever` into users' configs after they've done a migration, and then we can parse that and only run migrations newer than that.\r\n\r\nWe'll need to annotate the migrations we have somehow, but we can do that reasonably easily (a decorator, make them callable classes, whatever).\r\n\r\nThoughts?\n", "before_files": [{"content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=args.interactive, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--interactive\",\n action=\"store_true\",\n help=\"Interactively apply diff (similar to git add -p)\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py"}]} | 2,529 | 220 |
gh_patches_debug_19394 | rasdani/github-patches | git_diff | hylang__hy-1309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hy Ignores PYTHONDONTWRITEBYTECODE
When `PYTHONDONTWRITEBYTECODE` is set in the environment (for example using `export PYTHONDONTWRITEBYTECODE=1`) Hy should not generate `.pyc`.
</issue>
<code>
[start of hy/importer.py]
1 # Copyright 2017 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 from hy.compiler import hy_compile, HyTypeError
6 from hy.models import HyObject, replace_hy_obj
7 from hy.lex import tokenize, LexException
8 from hy.errors import HyIOError
9
10 from io import open
11 import re
12 import marshal
13 import struct
14 import imp
15 import sys
16 import ast
17 import os
18 import __future__
19
20 from hy._compat import PY3, PY34, MAGIC, builtins, long_type, wr_long
21 from hy._compat import string_types
22
23
24 def ast_compile(ast, filename, mode):
25 """Compile AST.
26 Like Python's compile, but with some special flags."""
27 flags = (__future__.CO_FUTURE_DIVISION |
28 __future__.CO_FUTURE_PRINT_FUNCTION)
29 return compile(ast, filename, mode, flags)
30
31
32 def import_buffer_to_hst(buf):
33 """Import content from buf and return a Hy AST."""
34 return tokenize(buf + "\n")
35
36
37 def import_file_to_hst(fpath):
38 """Import content from fpath and return a Hy AST."""
39 try:
40 with open(fpath, 'r', encoding='utf-8') as f:
41 return import_buffer_to_hst(f.read())
42 except IOError as e:
43 raise HyIOError(e.errno, e.strerror, e.filename)
44
45
46 def import_buffer_to_ast(buf, module_name):
47 """ Import content from buf and return a Python AST."""
48 return hy_compile(import_buffer_to_hst(buf), module_name)
49
50
51 def import_file_to_ast(fpath, module_name):
52 """Import content from fpath and return a Python AST."""
53 return hy_compile(import_file_to_hst(fpath), module_name)
54
55
56 def import_file_to_module(module_name, fpath, loader=None):
57 """Import Hy source from fpath and put it into a Python module.
58
59 If there's an up-to-date byte-compiled version of this module, load that
60 instead. Otherwise, byte-compile the module once we're done loading it, if
61 we can.
62
63 Return the module."""
64
65 module = None
66
67 bytecode_path = get_bytecode_path(fpath)
68 try:
69 source_mtime = int(os.stat(fpath).st_mtime)
70 with open(bytecode_path, 'rb') as bc_f:
71 # The first 4 bytes are the magic number for the version of Python
72 # that compiled this bytecode.
73 bytecode_magic = bc_f.read(4)
74 # The next 4 bytes, interpreted as a little-endian 32-bit integer,
75 # are the mtime of the corresponding source file.
76 bytecode_mtime, = struct.unpack('<i', bc_f.read(4))
77 except (IOError, OSError):
78 pass
79 else:
80 if bytecode_magic == MAGIC and bytecode_mtime >= source_mtime:
81 # It's a cache hit. Load the byte-compiled version.
82 if PY3:
83 # As of Python 3.6, imp.load_compiled still exists, but it's
84 # deprecated. So let's use SourcelessFileLoader instead.
85 from importlib.machinery import SourcelessFileLoader
86 module = (SourcelessFileLoader(module_name, bytecode_path).
87 load_module(module_name))
88 else:
89 module = imp.load_compiled(module_name, bytecode_path)
90
91 if not module:
92 # It's a cache miss, so load from source.
93 sys.modules[module_name] = None
94 try:
95 _ast = import_file_to_ast(fpath, module_name)
96 module = imp.new_module(module_name)
97 module.__file__ = fpath
98 code = ast_compile(_ast, fpath, "exec")
99 try:
100 write_code_as_pyc(fpath, code)
101 except (IOError, OSError):
102 # We failed to save the bytecode, probably because of a
103 # permissions issue. The user only asked to import the
104 # file, so don't bug them about it.
105 pass
106 eval(code, module.__dict__)
107 except (HyTypeError, LexException) as e:
108 if e.source is None:
109 with open(fpath, 'rt') as fp:
110 e.source = fp.read()
111 e.filename = fpath
112 raise
113 except Exception:
114 sys.modules.pop(module_name, None)
115 raise
116 sys.modules[module_name] = module
117 module.__name__ = module_name
118
119 module.__file__ = fpath
120 if loader:
121 module.__loader__ = loader
122 if is_package(module_name):
123 module.__path__ = []
124 module.__package__ = module_name
125 else:
126 module.__package__ = module_name.rpartition('.')[0]
127
128 return module
129
130
131 def import_buffer_to_module(module_name, buf):
132 try:
133 _ast = import_buffer_to_ast(buf, module_name)
134 mod = imp.new_module(module_name)
135 eval(ast_compile(_ast, "", "exec"), mod.__dict__)
136 except (HyTypeError, LexException) as e:
137 if e.source is None:
138 e.source = buf
139 e.filename = '<stdin>'
140 raise
141 return mod
142
143
144 def hy_eval(hytree, namespace, module_name, ast_callback=None):
145 foo = HyObject()
146 foo.start_line = 0
147 foo.end_line = 0
148 foo.start_column = 0
149 foo.end_column = 0
150 replace_hy_obj(hytree, foo)
151
152 if not isinstance(module_name, string_types):
153 raise HyTypeError(foo, "Module name must be a string")
154
155 _ast, expr = hy_compile(hytree, module_name, get_expr=True)
156
157 # Spoof the positions in the generated ast...
158 for node in ast.walk(_ast):
159 node.lineno = 1
160 node.col_offset = 1
161
162 for node in ast.walk(expr):
163 node.lineno = 1
164 node.col_offset = 1
165
166 if ast_callback:
167 ast_callback(_ast, expr)
168
169 if not isinstance(namespace, dict):
170 raise HyTypeError(foo, "Globals must be a dictionary")
171
172 # Two-step eval: eval() the body of the exec call
173 eval(ast_compile(_ast, "<eval_body>", "exec"), namespace)
174
175 # Then eval the expression context and return that
176 return eval(ast_compile(expr, "<eval>", "eval"), namespace)
177
178
179 def write_hy_as_pyc(fname):
180 _ast = import_file_to_ast(fname,
181 os.path.basename(os.path.splitext(fname)[0]))
182 code = ast_compile(_ast, fname, "exec")
183 write_code_as_pyc(fname, code)
184
185
186 def write_code_as_pyc(fname, code):
187 st = os.stat(fname)
188 timestamp = long_type(st.st_mtime)
189
190 cfile = get_bytecode_path(fname)
191 try:
192 os.makedirs(os.path.dirname(cfile))
193 except (IOError, OSError):
194 pass
195
196 with builtins.open(cfile, 'wb') as fc:
197 fc.write(MAGIC)
198 wr_long(fc, timestamp)
199 if PY3:
200 wr_long(fc, st.st_size)
201 marshal.dump(code, fc)
202
203
204 class MetaLoader(object):
205 def __init__(self, path):
206 self.path = path
207
208 def load_module(self, fullname):
209 if fullname in sys.modules:
210 return sys.modules[fullname]
211
212 if not self.path:
213 return
214
215 return import_file_to_module(fullname, self.path, self)
216
217
218 class MetaImporter(object):
219 def find_on_path(self, fullname):
220 fls = ["%s/__init__.hy", "%s.hy"]
221 dirpath = "/".join(fullname.split("."))
222
223 for pth in sys.path:
224 pth = os.path.abspath(pth)
225 for fp in fls:
226 composed_path = fp % ("%s/%s" % (pth, dirpath))
227 if os.path.exists(composed_path):
228 return composed_path
229
230 def find_module(self, fullname, path=None):
231 path = self.find_on_path(fullname)
232 if path:
233 return MetaLoader(path)
234
235
236 sys.meta_path.insert(0, MetaImporter())
237 sys.path.insert(0, "")
238
239
240 def is_package(module_name):
241 mpath = os.path.join(*module_name.split("."))
242 for path in map(os.path.abspath, sys.path):
243 if os.path.exists(os.path.join(path, mpath, "__init__.hy")):
244 return True
245 return False
246
247
248 def get_bytecode_path(source_path):
249 if PY34:
250 import importlib.util
251 return importlib.util.cache_from_source(source_path)
252 elif hasattr(imp, "cache_from_source"):
253 return imp.cache_from_source(source_path)
254 else:
255 # If source_path has a file extension, replace it with ".pyc".
256 # Otherwise, just append ".pyc".
257 d, f = os.path.split(source_path)
258 return os.path.join(d, re.sub(r"(?:\.[^.]+)?\Z", ".pyc", f))
259
[end of hy/importer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/importer.py b/hy/importer.py
--- a/hy/importer.py
+++ b/hy/importer.py
@@ -96,13 +96,14 @@
module = imp.new_module(module_name)
module.__file__ = fpath
code = ast_compile(_ast, fpath, "exec")
- try:
- write_code_as_pyc(fpath, code)
- except (IOError, OSError):
- # We failed to save the bytecode, probably because of a
- # permissions issue. The user only asked to import the
- # file, so don't bug them about it.
- pass
+ if not os.environ.get('PYTHONDONTWRITEBYTECODE'):
+ try:
+ write_code_as_pyc(fpath, code)
+ except (IOError, OSError):
+ # We failed to save the bytecode, probably because of a
+ # permissions issue. The user only asked to import the
+ # file, so don't bug them about it.
+ pass
eval(code, module.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
| {"golden_diff": "diff --git a/hy/importer.py b/hy/importer.py\n--- a/hy/importer.py\n+++ b/hy/importer.py\n@@ -96,13 +96,14 @@\n module = imp.new_module(module_name)\n module.__file__ = fpath\n code = ast_compile(_ast, fpath, \"exec\")\n- try:\n- write_code_as_pyc(fpath, code)\n- except (IOError, OSError):\n- # We failed to save the bytecode, probably because of a\n- # permissions issue. The user only asked to import the\n- # file, so don't bug them about it.\n- pass\n+ if not os.environ.get('PYTHONDONTWRITEBYTECODE'):\n+ try:\n+ write_code_as_pyc(fpath, code)\n+ except (IOError, OSError):\n+ # We failed to save the bytecode, probably because of a\n+ # permissions issue. The user only asked to import the\n+ # file, so don't bug them about it.\n+ pass\n eval(code, module.__dict__)\n except (HyTypeError, LexException) as e:\n if e.source is None:\n", "issue": "Hy Ignores PYTHONDONTWRITEBYTECODE\nWhen `PYTHONDONTWRITEBYTECODE` is set in the environment (for example using `export PYTHONDONTWRITEBYTECODE=1`) Hy should not generate `.pyc`.\n", "before_files": [{"content": "# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nfrom hy.compiler import hy_compile, HyTypeError\nfrom hy.models import HyObject, replace_hy_obj\nfrom hy.lex import tokenize, LexException\nfrom hy.errors import HyIOError\n\nfrom io import open\nimport re\nimport marshal\nimport struct\nimport imp\nimport sys\nimport ast\nimport os\nimport __future__\n\nfrom hy._compat import PY3, PY34, MAGIC, builtins, long_type, wr_long\nfrom hy._compat import string_types\n\n\ndef ast_compile(ast, filename, mode):\n \"\"\"Compile AST.\n Like Python's compile, but with some special flags.\"\"\"\n flags = (__future__.CO_FUTURE_DIVISION |\n __future__.CO_FUTURE_PRINT_FUNCTION)\n return compile(ast, filename, mode, flags)\n\n\ndef import_buffer_to_hst(buf):\n \"\"\"Import content from buf and return a Hy AST.\"\"\"\n return tokenize(buf + \"\\n\")\n\n\ndef import_file_to_hst(fpath):\n \"\"\"Import content from fpath and return a Hy AST.\"\"\"\n try:\n with open(fpath, 'r', encoding='utf-8') as f:\n return import_buffer_to_hst(f.read())\n except IOError as e:\n raise HyIOError(e.errno, e.strerror, e.filename)\n\n\ndef import_buffer_to_ast(buf, module_name):\n \"\"\" Import content from buf and return a Python AST.\"\"\"\n return hy_compile(import_buffer_to_hst(buf), module_name)\n\n\ndef import_file_to_ast(fpath, module_name):\n \"\"\"Import content from fpath and return a Python AST.\"\"\"\n return hy_compile(import_file_to_hst(fpath), module_name)\n\n\ndef import_file_to_module(module_name, fpath, loader=None):\n \"\"\"Import Hy source from fpath and put it into a Python module.\n\n If there's an up-to-date byte-compiled version of this module, load that\n instead. Otherwise, byte-compile the module once we're done loading it, if\n we can.\n\n Return the module.\"\"\"\n\n module = None\n\n bytecode_path = get_bytecode_path(fpath)\n try:\n source_mtime = int(os.stat(fpath).st_mtime)\n with open(bytecode_path, 'rb') as bc_f:\n # The first 4 bytes are the magic number for the version of Python\n # that compiled this bytecode.\n bytecode_magic = bc_f.read(4)\n # The next 4 bytes, interpreted as a little-endian 32-bit integer,\n # are the mtime of the corresponding source file.\n bytecode_mtime, = struct.unpack('<i', bc_f.read(4))\n except (IOError, OSError):\n pass\n else:\n if bytecode_magic == MAGIC and bytecode_mtime >= source_mtime:\n # It's a cache hit. Load the byte-compiled version.\n if PY3:\n # As of Python 3.6, imp.load_compiled still exists, but it's\n # deprecated. So let's use SourcelessFileLoader instead.\n from importlib.machinery import SourcelessFileLoader\n module = (SourcelessFileLoader(module_name, bytecode_path).\n load_module(module_name))\n else:\n module = imp.load_compiled(module_name, bytecode_path)\n\n if not module:\n # It's a cache miss, so load from source.\n sys.modules[module_name] = None\n try:\n _ast = import_file_to_ast(fpath, module_name)\n module = imp.new_module(module_name)\n module.__file__ = fpath\n code = ast_compile(_ast, fpath, \"exec\")\n try:\n write_code_as_pyc(fpath, code)\n except (IOError, OSError):\n # We failed to save the bytecode, probably because of a\n # permissions issue. The user only asked to import the\n # file, so don't bug them about it.\n pass\n eval(code, module.__dict__)\n except (HyTypeError, LexException) as e:\n if e.source is None:\n with open(fpath, 'rt') as fp:\n e.source = fp.read()\n e.filename = fpath\n raise\n except Exception:\n sys.modules.pop(module_name, None)\n raise\n sys.modules[module_name] = module\n module.__name__ = module_name\n\n module.__file__ = fpath\n if loader:\n module.__loader__ = loader\n if is_package(module_name):\n module.__path__ = []\n module.__package__ = module_name\n else:\n module.__package__ = module_name.rpartition('.')[0]\n\n return module\n\n\ndef import_buffer_to_module(module_name, buf):\n try:\n _ast = import_buffer_to_ast(buf, module_name)\n mod = imp.new_module(module_name)\n eval(ast_compile(_ast, \"\", \"exec\"), mod.__dict__)\n except (HyTypeError, LexException) as e:\n if e.source is None:\n e.source = buf\n e.filename = '<stdin>'\n raise\n return mod\n\n\ndef hy_eval(hytree, namespace, module_name, ast_callback=None):\n foo = HyObject()\n foo.start_line = 0\n foo.end_line = 0\n foo.start_column = 0\n foo.end_column = 0\n replace_hy_obj(hytree, foo)\n\n if not isinstance(module_name, string_types):\n raise HyTypeError(foo, \"Module name must be a string\")\n\n _ast, expr = hy_compile(hytree, module_name, get_expr=True)\n\n # Spoof the positions in the generated ast...\n for node in ast.walk(_ast):\n node.lineno = 1\n node.col_offset = 1\n\n for node in ast.walk(expr):\n node.lineno = 1\n node.col_offset = 1\n\n if ast_callback:\n ast_callback(_ast, expr)\n\n if not isinstance(namespace, dict):\n raise HyTypeError(foo, \"Globals must be a dictionary\")\n\n # Two-step eval: eval() the body of the exec call\n eval(ast_compile(_ast, \"<eval_body>\", \"exec\"), namespace)\n\n # Then eval the expression context and return that\n return eval(ast_compile(expr, \"<eval>\", \"eval\"), namespace)\n\n\ndef write_hy_as_pyc(fname):\n _ast = import_file_to_ast(fname,\n os.path.basename(os.path.splitext(fname)[0]))\n code = ast_compile(_ast, fname, \"exec\")\n write_code_as_pyc(fname, code)\n\n\ndef write_code_as_pyc(fname, code):\n st = os.stat(fname)\n timestamp = long_type(st.st_mtime)\n\n cfile = get_bytecode_path(fname)\n try:\n os.makedirs(os.path.dirname(cfile))\n except (IOError, OSError):\n pass\n\n with builtins.open(cfile, 'wb') as fc:\n fc.write(MAGIC)\n wr_long(fc, timestamp)\n if PY3:\n wr_long(fc, st.st_size)\n marshal.dump(code, fc)\n\n\nclass MetaLoader(object):\n def __init__(self, path):\n self.path = path\n\n def load_module(self, fullname):\n if fullname in sys.modules:\n return sys.modules[fullname]\n\n if not self.path:\n return\n\n return import_file_to_module(fullname, self.path, self)\n\n\nclass MetaImporter(object):\n def find_on_path(self, fullname):\n fls = [\"%s/__init__.hy\", \"%s.hy\"]\n dirpath = \"/\".join(fullname.split(\".\"))\n\n for pth in sys.path:\n pth = os.path.abspath(pth)\n for fp in fls:\n composed_path = fp % (\"%s/%s\" % (pth, dirpath))\n if os.path.exists(composed_path):\n return composed_path\n\n def find_module(self, fullname, path=None):\n path = self.find_on_path(fullname)\n if path:\n return MetaLoader(path)\n\n\nsys.meta_path.insert(0, MetaImporter())\nsys.path.insert(0, \"\")\n\n\ndef is_package(module_name):\n mpath = os.path.join(*module_name.split(\".\"))\n for path in map(os.path.abspath, sys.path):\n if os.path.exists(os.path.join(path, mpath, \"__init__.hy\")):\n return True\n return False\n\n\ndef get_bytecode_path(source_path):\n if PY34:\n import importlib.util\n return importlib.util.cache_from_source(source_path)\n elif hasattr(imp, \"cache_from_source\"):\n return imp.cache_from_source(source_path)\n else:\n # If source_path has a file extension, replace it with \".pyc\".\n # Otherwise, just append \".pyc\".\n d, f = os.path.split(source_path)\n return os.path.join(d, re.sub(r\"(?:\\.[^.]+)?\\Z\", \".pyc\", f))\n", "path": "hy/importer.py"}]} | 3,207 | 263 |
gh_patches_debug_26051 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2058 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KY failing since at least 2018-01-04
KY has been failing since 2018-01-04
Based on automated runs it appears that KY has not run successfully in 2 days (2018-01-04).
```
00:00:39 CRITICAL pupa: Session(s) 2018 Regular Session were reported by Kentucky.get_session_list() but were not found in Kentucky.legislative_sessions or Kentucky.ignored_scraped_sessions.
no pupa_settings on path, using defaults
ky (scrape, import)
people: {}
bills: {}
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of openstates/ky/__init__.py]
1 import re
2
3 from pupa.scrape import Jurisdiction, Organization
4
5 from openstates.utils import url_xpath
6
7 from .people import KYPersonScraper
8 from .committees import KYCommitteeScraper
9 from .bills import KYBillScraper
10
11
12 class Kentucky(Jurisdiction):
13 division_id = "ocd-division/country:us/state:ky"
14 classification = "government"
15 name = "Kentucky"
16 url = "http://www.lrc.ky.gov/"
17 scrapers = {
18 'people': KYPersonScraper,
19 'committees': KYCommitteeScraper,
20 'bills': KYBillScraper,
21 }
22 parties = [
23 {'name': 'Republican'},
24 {'name': 'Democratic'}
25 ]
26 legislative_sessions = [
27 {
28 "_scraped_name": "2011 Regular Session",
29 "classification": "primary",
30 "end_date": "2011-03-09",
31 "identifier": "2011 Regular Session",
32 "name": "2011 Regular Session",
33 "start_date": "2011-01-04"
34 },
35 {
36 "_scraped_name": "2011 Extraordinary Session",
37 "classification": "special",
38 "end_date": "2011-04-06",
39 "identifier": "2011SS",
40 "name": "2011 Extraordinary Session",
41 "start_date": "2011-03-14"
42 },
43 {
44 "_scraped_name": "2012 Regular Session",
45 "classification": "primary",
46 "end_date": "2012-04-12",
47 "identifier": "2012RS",
48 "name": "2012 Regular Session",
49 "start_date": "2012-01-03"
50 },
51 {
52 "_scraped_name": "2012 Extraordinary Session",
53 "classification": "special",
54 "end_date": "2012-04-20",
55 "identifier": "2012SS",
56 "name": "2012 Extraordinary Session",
57 "start_date": "2012-04-16"
58 },
59 {
60 "_scraped_name": "2013 Regular Session",
61 "classification": "primary",
62 "end_date": "2013-03-26",
63 "identifier": "2013RS",
64 "name": "2013 Regular Session",
65 "start_date": "2013-01-08"
66 },
67 {
68 "_scraped_name": "2013 Extraordinary Session",
69 "classification": "special",
70 "end_date": "2013-08-19",
71 "identifier": "2013SS",
72 "name": "2013 Extraordinary Session",
73 "start_date": "2013-08-19"
74 },
75 {
76 "_scraped_name": "2014 Regular Session",
77 "classification": "primary",
78 "end_date": "2014-04-15",
79 "identifier": "2014RS",
80 "name": "2014 Regular Session",
81 "start_date": "2014-01-07"
82 },
83 {
84 "_scraped_name": "2015 Regular Session",
85 "classification": "primary",
86 "end_date": "2015-03-25",
87 "identifier": "2015RS",
88 "name": "2015 Regular Session",
89 "start_date": "2015-01-06"
90 },
91 {
92 "_scraped_name": "2016 Regular Session",
93 "classification": "primary",
94 "end_date": "2016-04-12",
95 "identifier": "2016RS",
96 "name": "2016 Regular Session",
97 "start_date": "2016-01-05"
98 },
99 {
100 "_scraped_name": "2017 Regular Session",
101 "classification": "primary",
102 "end_date": "2017-03-30",
103 "identifier": "2017RS",
104 "name": "2017 Regular Session",
105 "start_date": "2017-01-03"
106 }
107 ]
108 ignored_scraped_sessions = []
109
110 def get_organizations(self):
111 legislature_name = "Kentucky General Assembly"
112 lower_chamber_name = "House"
113 lower_seats = 100
114 lower_title = "Representative"
115 upper_chamber_name = "Senate"
116 upper_seats = 38
117 upper_title = "Senator"
118
119 legislature = Organization(name=legislature_name,
120 classification="legislature")
121 upper = Organization(upper_chamber_name, classification='upper',
122 parent_id=legislature._id)
123 lower = Organization(lower_chamber_name, classification='lower',
124 parent_id=legislature._id)
125
126 for n in range(1, upper_seats + 1):
127 upper.add_post(
128 label=str(n), role=upper_title,
129 division_id='{}/sldu:{}'.format(self.division_id, n))
130 for n in range(1, lower_seats + 1):
131 lower.add_post(
132 label=str(n), role=lower_title,
133 division_id='{}/sldl:{}'.format(self.division_id, n))
134
135 yield legislature
136 yield upper
137 yield lower
138
139 def get_session_list(self):
140 sessions = url_xpath(
141 'http://www.lrc.ky.gov/legislation.htm',
142 '//a[contains(@href, "record.htm")]/text()[normalize-space()]')
143
144 for index, session in enumerate(sessions):
145 # Remove escaped whitespace characters.
146 sessions[index] = re.sub(r'[\r\n\t]+', '', session)
147
148 return sessions
149
[end of openstates/ky/__init__.py]
[start of billy_metadata/ky.py]
1 import datetime
2
3 metadata = {
4 'name': 'Kentucky',
5 'abbreviation': 'ky',
6 'capitol_timezone': 'America/New_York',
7 'legislature_name': 'Kentucky General Assembly',
8 'legislature_url': 'http://www.lrc.ky.gov/',
9 'chambers': {
10 'upper': {'name': 'Senate', 'title': 'Senator'},
11 'lower': {'name': 'House', 'title': 'Representative'},
12 },
13 'terms': [
14 {
15 'name': '2011-2012',
16 'start_year': 2011,
17 'end_year': 2012,
18 'sessions': [
19 '2011 Regular Session', '2011SS', '2012RS', '2012SS'
20 ]
21 },
22 {
23 'name': '2013-2014',
24 'start_year': 2013,
25 'end_year': 2014,
26 'sessions': [
27 '2013RS', '2013SS', '2014RS',
28 ]
29 },
30 {
31 'name': '2015-2016',
32 'start_year': 2015,
33 'end_year': 2016,
34 'sessions': [
35 '2015RS', '2016RS',
36 ]
37 },
38 {
39 'name': '2017-2018',
40 'start_year': 2017,
41 'end_year': 2018,
42 'sessions': [
43 '2017RS',
44 ]
45 },
46 ],
47 'session_details': {
48 '2011 Regular Session': {
49 'type': 'primary',
50 'start_date': datetime.date(2011, 1, 4),
51 'end_date': datetime.date(2011, 3, 9),
52 'display_name': '2011 Regular Session',
53 '_scraped_name': '2011 Regular Session',
54 },
55 '2011SS': {
56 'type': 'special',
57 'start_date': datetime.date(2011, 3, 14),
58 'end_date': datetime.date(2011, 4, 6),
59 'display_name': '2011 Extraordinary Session',
60 '_scraped_name': '2011 Extraordinary Session',
61 },
62 '2012RS': {
63 'type': 'primary',
64 'start_date': datetime.date(2012, 1, 3),
65 'end_date': datetime.date(2012, 4, 12),
66 'display_name': '2012 Regular Session',
67 '_scraped_name': '2012 Regular Session',
68 },
69 '2012SS': {
70 'type': 'special',
71 'start_date': datetime.date(2012, 4, 16),
72 'end_date': datetime.date(2012, 4, 20),
73 'display_name': '2012 Extraordinary Session',
74 '_scraped_name': '2012 Extraordinary Session',
75 },
76 '2013RS': {
77 'type': 'primary',
78 'start_date': datetime.date(2013, 1, 8),
79 'end_date': datetime.date(2013, 3, 26),
80 'display_name': '2013 Regular Session',
81 '_scraped_name': '2013 Regular Session',
82 },
83 '2013SS': {
84 'type': 'special',
85 'start_date': datetime.date(2013, 8, 19),
86 'end_date': datetime.date(2013, 8, 19),
87 'display_name': '2013 Extraordinary Session',
88 '_scraped_name': '2013 Extraordinary Session',
89 },
90 '2014RS': {
91 'type': 'primary',
92 'start_date': datetime.date(2014, 1, 7),
93 'end_date': datetime.date(2014, 4, 15),
94 'display_name': '2014 Regular Session',
95 '_scraped_name': '2014 Regular Session',
96 },
97 '2015RS': {
98 'type': 'primary',
99 'start_date': datetime.date(2015, 1, 6),
100 'end_date': datetime.date(2015, 3, 25),
101 'display_name': '2015 Regular Session',
102 '_scraped_name': '2015 Regular Session',
103 },
104 '2016RS': {
105 'type': 'primary',
106 'start_date': datetime.date(2016, 1, 5),
107 'end_date': datetime.date(2016, 4, 12),
108 'display_name': '2016 Regular Session',
109 '_scraped_name': '2016 Regular Session',
110 },
111 '2017RS': {
112 'type': 'primary',
113 'start_date': datetime.date(2017, 1, 3),
114 'display_name': '2017 Regular Session',
115 '_scraped_name': '2017 Regular Session',
116 },
117 },
118 'feature_flags': ['subjects', 'events', 'influenceexplorer'],
119 '_ignored_scraped_sessions': [],
120 }
121
[end of billy_metadata/ky.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/billy_metadata/ky.py b/billy_metadata/ky.py
--- a/billy_metadata/ky.py
+++ b/billy_metadata/ky.py
@@ -41,6 +41,7 @@
'end_year': 2018,
'sessions': [
'2017RS',
+ '2018RS',
]
},
],
@@ -114,6 +115,12 @@
'display_name': '2017 Regular Session',
'_scraped_name': '2017 Regular Session',
},
+ '2018RS': {
+ 'type': 'primary',
+ 'start_date': datetime.date(2018, 1, 2),
+ 'display_name': '2018 Regular Session',
+ '_scraped_name': '2018 Regular Session',
+ },
},
'feature_flags': ['subjects', 'events', 'influenceexplorer'],
'_ignored_scraped_sessions': [],
diff --git a/openstates/ky/__init__.py b/openstates/ky/__init__.py
--- a/openstates/ky/__init__.py
+++ b/openstates/ky/__init__.py
@@ -103,7 +103,15 @@
"identifier": "2017RS",
"name": "2017 Regular Session",
"start_date": "2017-01-03"
- }
+ },
+ {
+ "_scraped_name": "2018 Regular Session",
+ "classification": "primary",
+ "end_date": "2018-04-13",
+ "identifier": "2018RS",
+ "name": "2018 Regular Session",
+ "start_date": "2018-01-02"
+ },
]
ignored_scraped_sessions = []
| {"golden_diff": "diff --git a/billy_metadata/ky.py b/billy_metadata/ky.py\n--- a/billy_metadata/ky.py\n+++ b/billy_metadata/ky.py\n@@ -41,6 +41,7 @@\n 'end_year': 2018,\n 'sessions': [\n '2017RS',\n+ '2018RS',\n ]\n },\n ],\n@@ -114,6 +115,12 @@\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017 Regular Session',\n },\n+ '2018RS': {\n+ 'type': 'primary',\n+ 'start_date': datetime.date(2018, 1, 2),\n+ 'display_name': '2018 Regular Session',\n+ '_scraped_name': '2018 Regular Session',\n+ },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [],\ndiff --git a/openstates/ky/__init__.py b/openstates/ky/__init__.py\n--- a/openstates/ky/__init__.py\n+++ b/openstates/ky/__init__.py\n@@ -103,7 +103,15 @@\n \"identifier\": \"2017RS\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-03\"\n- }\n+ },\n+ {\n+ \"_scraped_name\": \"2018 Regular Session\",\n+ \"classification\": \"primary\",\n+ \"end_date\": \"2018-04-13\",\n+ \"identifier\": \"2018RS\",\n+ \"name\": \"2018 Regular Session\",\n+ \"start_date\": \"2018-01-02\"\n+ },\n ]\n ignored_scraped_sessions = []\n", "issue": "KY failing since at least 2018-01-04\nKY has been failing since 2018-01-04\n\nBased on automated runs it appears that KY has not run successfully in 2 days (2018-01-04).\n\n\n```\n 00:00:39 CRITICAL pupa: Session(s) 2018 Regular Session were reported by Kentucky.get_session_list() but were not found in Kentucky.legislative_sessions or Kentucky.ignored_scraped_sessions.\nno pupa_settings on path, using defaults\nky (scrape, import)\n people: {}\n bills: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import re\n\nfrom pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .people import KYPersonScraper\nfrom .committees import KYCommitteeScraper\nfrom .bills import KYBillScraper\n\n\nclass Kentucky(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ky\"\n classification = \"government\"\n name = \"Kentucky\"\n url = \"http://www.lrc.ky.gov/\"\n scrapers = {\n 'people': KYPersonScraper,\n 'committees': KYCommitteeScraper,\n 'bills': KYBillScraper,\n }\n parties = [\n {'name': 'Republican'},\n {'name': 'Democratic'}\n ]\n legislative_sessions = [\n {\n \"_scraped_name\": \"2011 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2011-03-09\",\n \"identifier\": \"2011 Regular Session\",\n \"name\": \"2011 Regular Session\",\n \"start_date\": \"2011-01-04\"\n },\n {\n \"_scraped_name\": \"2011 Extraordinary Session\",\n \"classification\": \"special\",\n \"end_date\": \"2011-04-06\",\n \"identifier\": \"2011SS\",\n \"name\": \"2011 Extraordinary Session\",\n \"start_date\": \"2011-03-14\"\n },\n {\n \"_scraped_name\": \"2012 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2012-04-12\",\n \"identifier\": \"2012RS\",\n \"name\": \"2012 Regular Session\",\n \"start_date\": \"2012-01-03\"\n },\n {\n \"_scraped_name\": \"2012 Extraordinary Session\",\n \"classification\": \"special\",\n \"end_date\": \"2012-04-20\",\n \"identifier\": \"2012SS\",\n \"name\": \"2012 Extraordinary Session\",\n \"start_date\": \"2012-04-16\"\n },\n {\n \"_scraped_name\": \"2013 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-03-26\",\n \"identifier\": \"2013RS\",\n \"name\": \"2013 Regular Session\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"2013 Extraordinary Session\",\n \"classification\": \"special\",\n \"end_date\": \"2013-08-19\",\n \"identifier\": \"2013SS\",\n \"name\": \"2013 Extraordinary Session\",\n \"start_date\": \"2013-08-19\"\n },\n {\n \"_scraped_name\": \"2014 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2014-04-15\",\n \"identifier\": \"2014RS\",\n \"name\": \"2014 Regular Session\",\n \"start_date\": \"2014-01-07\"\n },\n {\n \"_scraped_name\": \"2015 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-03-25\",\n \"identifier\": \"2015RS\",\n \"name\": \"2015 Regular Session\",\n \"start_date\": \"2015-01-06\"\n },\n {\n \"_scraped_name\": \"2016 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2016-04-12\",\n \"identifier\": \"2016RS\",\n \"name\": \"2016 Regular Session\",\n \"start_date\": \"2016-01-05\"\n },\n {\n \"_scraped_name\": \"2017 Regular Session\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-03-30\",\n \"identifier\": \"2017RS\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-03\"\n }\n ]\n ignored_scraped_sessions = []\n\n def get_organizations(self):\n legislature_name = \"Kentucky General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 100\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 38\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath(\n 'http://www.lrc.ky.gov/legislation.htm',\n '//a[contains(@href, \"record.htm\")]/text()[normalize-space()]')\n\n for index, session in enumerate(sessions):\n # Remove escaped whitespace characters.\n sessions[index] = re.sub(r'[\\r\\n\\t]+', '', session)\n\n return sessions\n", "path": "openstates/ky/__init__.py"}, {"content": "import datetime\n\nmetadata = {\n 'name': 'Kentucky',\n 'abbreviation': 'ky',\n 'capitol_timezone': 'America/New_York',\n 'legislature_name': 'Kentucky General Assembly',\n 'legislature_url': 'http://www.lrc.ky.gov/',\n 'chambers': {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n 'terms': [\n {\n 'name': '2011-2012',\n 'start_year': 2011,\n 'end_year': 2012,\n 'sessions': [\n '2011 Regular Session', '2011SS', '2012RS', '2012SS'\n ]\n },\n {\n 'name': '2013-2014',\n 'start_year': 2013,\n 'end_year': 2014,\n 'sessions': [\n '2013RS', '2013SS', '2014RS',\n ]\n },\n {\n 'name': '2015-2016',\n 'start_year': 2015,\n 'end_year': 2016,\n 'sessions': [\n '2015RS', '2016RS',\n ]\n },\n {\n 'name': '2017-2018',\n 'start_year': 2017,\n 'end_year': 2018,\n 'sessions': [\n '2017RS',\n ]\n },\n ],\n 'session_details': {\n '2011 Regular Session': {\n 'type': 'primary',\n 'start_date': datetime.date(2011, 1, 4),\n 'end_date': datetime.date(2011, 3, 9),\n 'display_name': '2011 Regular Session',\n '_scraped_name': '2011 Regular Session',\n },\n '2011SS': {\n 'type': 'special',\n 'start_date': datetime.date(2011, 3, 14),\n 'end_date': datetime.date(2011, 4, 6),\n 'display_name': '2011 Extraordinary Session',\n '_scraped_name': '2011 Extraordinary Session',\n },\n '2012RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2012, 1, 3),\n 'end_date': datetime.date(2012, 4, 12),\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012 Regular Session',\n },\n '2012SS': {\n 'type': 'special',\n 'start_date': datetime.date(2012, 4, 16),\n 'end_date': datetime.date(2012, 4, 20),\n 'display_name': '2012 Extraordinary Session',\n '_scraped_name': '2012 Extraordinary Session',\n },\n '2013RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2013, 1, 8),\n 'end_date': datetime.date(2013, 3, 26),\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013 Regular Session',\n },\n '2013SS': {\n 'type': 'special',\n 'start_date': datetime.date(2013, 8, 19),\n 'end_date': datetime.date(2013, 8, 19),\n 'display_name': '2013 Extraordinary Session',\n '_scraped_name': '2013 Extraordinary Session',\n },\n '2014RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2014, 1, 7),\n 'end_date': datetime.date(2014, 4, 15),\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014 Regular Session',\n },\n '2015RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2015, 1, 6),\n 'end_date': datetime.date(2015, 3, 25),\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015 Regular Session',\n },\n '2016RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2016, 1, 5),\n 'end_date': datetime.date(2016, 4, 12),\n 'display_name': '2016 Regular Session',\n '_scraped_name': '2016 Regular Session',\n },\n '2017RS': {\n 'type': 'primary',\n 'start_date': datetime.date(2017, 1, 3),\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017 Regular Session',\n },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [],\n}\n", "path": "billy_metadata/ky.py"}]} | 3,915 | 437 |
gh_patches_debug_28981 | rasdani/github-patches | git_diff | jazzband__pip-tools-1050 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
piptools depcache is shared between implementations causing incorrect dependencies
<!-- Describe the issue briefly here. -->
#### Environment Versions
```console
$ uname -a
Linux asottile-MacBookPro 4.15.0-74-generic #84-Ubuntu SMP Thu Dec 19 08:06:28 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 18.04.3 LTS
Release: 18.04
Codename: bionic
$ /tmp/x/venv36/bin/pip freeze --all
Click==7.0
pip==20.0.2
pip-tools==4.4.0
setuptools==45.1.0
six==1.14.0
wheel==0.34.1
$ /tmp/x/venv36/bin/python --version --version
Python 3.6.9 (default, Nov 7 2019, 10:44:02)
[GCC 8.3.0]
$ /tmp/x/venvpp36/bin/python --version --version
Python 3.6.9 (1608da62bfc7, Dec 23 2019, 10:50:04)
[PyPy 7.3.0 with GCC 7.3.1 20180303 (Red Hat 7.3.1-5)]
```
#### Steps to replicate
```bash
set -euxo pipefail
cache="$PWD/cache"
export XDG_CACHE_HOME="$cache"
export CUSTOM_COMPILE_COMMAND='<<redacted>>'
rm -rf "$cache" venv36 venvpp36 reqs*.txt
virtualenv venv36 -ppython3.6 >& /dev/null
venv36/bin/pip install -qq pip-tools
virtualenv venvpp36 -ppypy3 >& /dev/null
venvpp36/bin/pip install -qq pip-tools
echo 'cffi' > requirements.in
venv36/bin/pip-compile --output-file reqs36_1.txt
venvpp36/bin/pip-compile --output-file reqspp36_1.txt
rm -rf "$cache"
venvpp36/bin/pip-compile --output-file reqspp36_2.txt
venv36/bin/pip-compile --output-file reqs36_2.txt
diff -u reqs36_1.txt reqs36_2.txt
diff -u reqspp36_1.txt reqspp36_2.txt
```
#### Expected result
I expect no diff to be produced and the script to exit 0
#### Actual result
```console
$ bash t.sh
+ cache=/tmp/x/cache
+ export XDG_CACHE_HOME=/tmp/x/cache
+ XDG_CACHE_HOME=/tmp/x/cache
+ export 'CUSTOM_COMPILE_COMMAND=<<redacted>>'
+ CUSTOM_COMPILE_COMMAND='<<redacted>>'
+ rm -rf /tmp/x/cache venv36 venvpp36 reqs36_1.txt reqs36_2.txt reqspp36_1.txt reqspp36_2.txt
+ virtualenv venv36 -ppython3.6
+ venv36/bin/pip install -qq pip-tools
+ virtualenv venvpp36 -ppypy3
+ venvpp36/bin/pip install -qq pip-tools
+ echo cffi
+ venv36/bin/pip-compile --output-file reqs36_1.txt
#
# This file is autogenerated by pip-compile
# To update, run:
#
# <<redacted>>
#
cffi==1.13.2
pycparser==2.19 # via cffi
+ venvpp36/bin/pip-compile --output-file reqspp36_1.txt
#
# This file is autogenerated by pip-compile
# To update, run:
#
# <<redacted>>
#
cffi==1.13.2
pycparser==2.19 # via cffi
+ rm -rf /tmp/x/cache
+ venvpp36/bin/pip-compile --output-file reqspp36_2.txt
#
# This file is autogenerated by pip-compile
# To update, run:
#
# <<redacted>>
#
cffi==1.13.2
+ venv36/bin/pip-compile --output-file reqs36_2.txt
#
# This file is autogenerated by pip-compile
# To update, run:
#
# <<redacted>>
#
cffi==1.13.2
+ diff -u reqs36_1.txt reqs36_2.txt
--- reqs36_1.txt 2020-01-30 08:46:36.913122945 -0800
+++ reqs36_2.txt 2020-01-30 08:46:41.696946374 -0800
@@ -5,4 +5,3 @@
# <<redacted>>
#
cffi==1.13.2
-pycparser==2.19 # via cffi
```
as you can see here, the pypy 3.6 cache is poisoning the result of the cpython 3.6 pip-compile
</issue>
<code>
[start of piptools/cache.py]
1 # coding: utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import json
5 import os
6 import sys
7
8 from pip._vendor.packaging.requirements import Requirement
9
10 from .exceptions import PipToolsError
11 from .utils import as_tuple, key_from_req, lookup_table
12
13
14 class CorruptCacheError(PipToolsError):
15 def __init__(self, path):
16 self.path = path
17
18 def __str__(self):
19 lines = [
20 "The dependency cache seems to have been corrupted.",
21 "Inspect, or delete, the following file:",
22 " {}".format(self.path),
23 ]
24 return os.linesep.join(lines)
25
26
27 def read_cache_file(cache_file_path):
28 with open(cache_file_path, "r") as cache_file:
29 try:
30 doc = json.load(cache_file)
31 except ValueError:
32 raise CorruptCacheError(cache_file_path)
33
34 # Check version and load the contents
35 if doc["__format__"] != 1:
36 raise AssertionError("Unknown cache file format")
37 return doc["dependencies"]
38
39
40 class DependencyCache(object):
41 """
42 Creates a new persistent dependency cache for the current Python version.
43 The cache file is written to the appropriate user cache dir for the
44 current platform, i.e.
45
46 ~/.cache/pip-tools/depcache-pyX.Y.json
47
48 Where X.Y indicates the Python version.
49 """
50
51 def __init__(self, cache_dir):
52 if not os.path.isdir(cache_dir):
53 os.makedirs(cache_dir)
54 py_version = ".".join(str(digit) for digit in sys.version_info[:2])
55 cache_filename = "depcache-py{}.json".format(py_version)
56
57 self._cache_file = os.path.join(cache_dir, cache_filename)
58 self._cache = None
59
60 @property
61 def cache(self):
62 """
63 The dictionary that is the actual in-memory cache. This property
64 lazily loads the cache from disk.
65 """
66 if self._cache is None:
67 self.read_cache()
68 return self._cache
69
70 def as_cache_key(self, ireq):
71 """
72 Given a requirement, return its cache key. This behavior is a little weird
73 in order to allow backwards compatibility with cache files. For a requirement
74 without extras, this will return, for example:
75
76 ("ipython", "2.1.0")
77
78 For a requirement with extras, the extras will be comma-separated and appended
79 to the version, inside brackets, like so:
80
81 ("ipython", "2.1.0[nbconvert,notebook]")
82 """
83 name, version, extras = as_tuple(ireq)
84 if not extras:
85 extras_string = ""
86 else:
87 extras_string = "[{}]".format(",".join(extras))
88 return name, "{}{}".format(version, extras_string)
89
90 def read_cache(self):
91 """Reads the cached contents into memory."""
92 if os.path.exists(self._cache_file):
93 self._cache = read_cache_file(self._cache_file)
94 else:
95 self._cache = {}
96
97 def write_cache(self):
98 """Writes the cache to disk as JSON."""
99 doc = {"__format__": 1, "dependencies": self._cache}
100 with open(self._cache_file, "w") as f:
101 json.dump(doc, f, sort_keys=True)
102
103 def clear(self):
104 self._cache = {}
105 self.write_cache()
106
107 def __contains__(self, ireq):
108 pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
109 return pkgversion_and_extras in self.cache.get(pkgname, {})
110
111 def __getitem__(self, ireq):
112 pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
113 return self.cache[pkgname][pkgversion_and_extras]
114
115 def __setitem__(self, ireq, values):
116 pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
117 self.cache.setdefault(pkgname, {})
118 self.cache[pkgname][pkgversion_and_extras] = values
119 self.write_cache()
120
121 def reverse_dependencies(self, ireqs):
122 """
123 Returns a lookup table of reverse dependencies for all the given ireqs.
124
125 Since this is all static, it only works if the dependency cache
126 contains the complete data, otherwise you end up with a partial view.
127 This is typically no problem if you use this function after the entire
128 dependency tree is resolved.
129 """
130 ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs]
131 return self._reverse_dependencies(ireqs_as_cache_values)
132
133 def _reverse_dependencies(self, cache_keys):
134 """
135 Returns a lookup table of reverse dependencies for all the given cache keys.
136
137 Example input:
138
139 [('pep8', '1.5.7'),
140 ('flake8', '2.4.0'),
141 ('mccabe', '0.3'),
142 ('pyflakes', '0.8.1')]
143
144 Example output:
145
146 {'pep8': ['flake8'],
147 'flake8': [],
148 'mccabe': ['flake8'],
149 'pyflakes': ['flake8']}
150
151 """
152 # First, collect all the dependencies into a sequence of (parent, child)
153 # tuples, like [('flake8', 'pep8'), ('flake8', 'mccabe'), ...]
154 return lookup_table(
155 (key_from_req(Requirement(dep_name)), name)
156 for name, version_and_extras in cache_keys
157 for dep_name in self.cache[name][version_and_extras]
158 )
159
[end of piptools/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/piptools/cache.py b/piptools/cache.py
--- a/piptools/cache.py
+++ b/piptools/cache.py
@@ -3,6 +3,7 @@
import json
import os
+import platform
import sys
from pip._vendor.packaging.requirements import Requirement
@@ -10,6 +11,17 @@
from .exceptions import PipToolsError
from .utils import as_tuple, key_from_req, lookup_table
+_PEP425_PY_TAGS = {"cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy"}
+
+
+def _implementation_name():
+ """similar to PEP 425, however the minor version is separated from the
+ major to differentation "3.10" and "31.0".
+ """
+ implementation_name = platform.python_implementation().lower()
+ implementation = _PEP425_PY_TAGS.get(implementation_name, "??")
+ return "{}{}.{}".format(implementation, *sys.version_info)
+
class CorruptCacheError(PipToolsError):
def __init__(self, path):
@@ -45,14 +57,14 @@
~/.cache/pip-tools/depcache-pyX.Y.json
+ Where py indicates the Python implementation.
Where X.Y indicates the Python version.
"""
def __init__(self, cache_dir):
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
- py_version = ".".join(str(digit) for digit in sys.version_info[:2])
- cache_filename = "depcache-py{}.json".format(py_version)
+ cache_filename = "depcache-{}.json".format(_implementation_name())
self._cache_file = os.path.join(cache_dir, cache_filename)
self._cache = None
| {"golden_diff": "diff --git a/piptools/cache.py b/piptools/cache.py\n--- a/piptools/cache.py\n+++ b/piptools/cache.py\n@@ -3,6 +3,7 @@\n \n import json\n import os\n+import platform\n import sys\n \n from pip._vendor.packaging.requirements import Requirement\n@@ -10,6 +11,17 @@\n from .exceptions import PipToolsError\n from .utils import as_tuple, key_from_req, lookup_table\n \n+_PEP425_PY_TAGS = {\"cpython\": \"cp\", \"pypy\": \"pp\", \"ironpython\": \"ip\", \"jython\": \"jy\"}\n+\n+\n+def _implementation_name():\n+ \"\"\"similar to PEP 425, however the minor version is separated from the\n+ major to differentation \"3.10\" and \"31.0\".\n+ \"\"\"\n+ implementation_name = platform.python_implementation().lower()\n+ implementation = _PEP425_PY_TAGS.get(implementation_name, \"??\")\n+ return \"{}{}.{}\".format(implementation, *sys.version_info)\n+\n \n class CorruptCacheError(PipToolsError):\n def __init__(self, path):\n@@ -45,14 +57,14 @@\n \n ~/.cache/pip-tools/depcache-pyX.Y.json\n \n+ Where py indicates the Python implementation.\n Where X.Y indicates the Python version.\n \"\"\"\n \n def __init__(self, cache_dir):\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n- py_version = \".\".join(str(digit) for digit in sys.version_info[:2])\n- cache_filename = \"depcache-py{}.json\".format(py_version)\n+ cache_filename = \"depcache-{}.json\".format(_implementation_name())\n \n self._cache_file = os.path.join(cache_dir, cache_filename)\n self._cache = None\n", "issue": "piptools depcache is shared between implementations causing incorrect dependencies\n<!-- Describe the issue briefly here. -->\r\n\r\n#### Environment Versions\r\n\r\n```console\r\n$ uname -a\r\nLinux asottile-MacBookPro 4.15.0-74-generic #84-Ubuntu SMP Thu Dec 19 08:06:28 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux\r\n$ lsb_release -a\r\nNo LSB modules are available.\r\nDistributor ID:\tUbuntu\r\nDescription:\tUbuntu 18.04.3 LTS\r\nRelease:\t18.04\r\nCodename:\tbionic\r\n$ /tmp/x/venv36/bin/pip freeze --all\r\nClick==7.0\r\npip==20.0.2\r\npip-tools==4.4.0\r\nsetuptools==45.1.0\r\nsix==1.14.0\r\nwheel==0.34.1\r\n$ /tmp/x/venv36/bin/python --version --version\r\nPython 3.6.9 (default, Nov 7 2019, 10:44:02) \r\n[GCC 8.3.0]\r\n$ /tmp/x/venvpp36/bin/python --version --version\r\nPython 3.6.9 (1608da62bfc7, Dec 23 2019, 10:50:04)\r\n[PyPy 7.3.0 with GCC 7.3.1 20180303 (Red Hat 7.3.1-5)]\r\n```\r\n\r\n#### Steps to replicate\r\n\r\n```bash\r\nset -euxo pipefail\r\n\r\ncache=\"$PWD/cache\"\r\nexport XDG_CACHE_HOME=\"$cache\"\r\nexport CUSTOM_COMPILE_COMMAND='<<redacted>>'\r\n\r\nrm -rf \"$cache\" venv36 venvpp36 reqs*.txt\r\n\r\nvirtualenv venv36 -ppython3.6 >& /dev/null\r\nvenv36/bin/pip install -qq pip-tools\r\nvirtualenv venvpp36 -ppypy3 >& /dev/null\r\nvenvpp36/bin/pip install -qq pip-tools\r\n\r\necho 'cffi' > requirements.in\r\n\r\nvenv36/bin/pip-compile --output-file reqs36_1.txt\r\nvenvpp36/bin/pip-compile --output-file reqspp36_1.txt\r\n\r\nrm -rf \"$cache\"\r\nvenvpp36/bin/pip-compile --output-file reqspp36_2.txt\r\nvenv36/bin/pip-compile --output-file reqs36_2.txt\r\n\r\ndiff -u reqs36_1.txt reqs36_2.txt\r\ndiff -u reqspp36_1.txt reqspp36_2.txt\r\n```\r\n\r\n#### Expected result\r\n\r\nI expect no diff to be produced and the script to exit 0\r\n\r\n#### Actual result\r\n\r\n```console\r\n$ bash t.sh \r\n+ cache=/tmp/x/cache\r\n+ export XDG_CACHE_HOME=/tmp/x/cache\r\n+ XDG_CACHE_HOME=/tmp/x/cache\r\n+ export 'CUSTOM_COMPILE_COMMAND=<<redacted>>'\r\n+ CUSTOM_COMPILE_COMMAND='<<redacted>>'\r\n+ rm -rf /tmp/x/cache venv36 venvpp36 reqs36_1.txt reqs36_2.txt reqspp36_1.txt reqspp36_2.txt\r\n+ virtualenv venv36 -ppython3.6\r\n+ venv36/bin/pip install -qq pip-tools\r\n+ virtualenv venvpp36 -ppypy3\r\n+ venvpp36/bin/pip install -qq pip-tools\r\n+ echo cffi\r\n+ venv36/bin/pip-compile --output-file reqs36_1.txt\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# <<redacted>>\r\n#\r\ncffi==1.13.2\r\npycparser==2.19 # via cffi\r\n+ venvpp36/bin/pip-compile --output-file reqspp36_1.txt\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# <<redacted>>\r\n#\r\ncffi==1.13.2\r\npycparser==2.19 # via cffi\r\n+ rm -rf /tmp/x/cache\r\n+ venvpp36/bin/pip-compile --output-file reqspp36_2.txt\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# <<redacted>>\r\n#\r\ncffi==1.13.2\r\n+ venv36/bin/pip-compile --output-file reqs36_2.txt\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# <<redacted>>\r\n#\r\ncffi==1.13.2\r\n+ diff -u reqs36_1.txt reqs36_2.txt\r\n--- reqs36_1.txt\t2020-01-30 08:46:36.913122945 -0800\r\n+++ reqs36_2.txt\t2020-01-30 08:46:41.696946374 -0800\r\n@@ -5,4 +5,3 @@\r\n # <<redacted>>\r\n #\r\n cffi==1.13.2\r\n-pycparser==2.19 # via cffi\r\n```\r\n\r\n\r\nas you can see here, the pypy 3.6 cache is poisoning the result of the cpython 3.6 pip-compile\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport os\nimport sys\n\nfrom pip._vendor.packaging.requirements import Requirement\n\nfrom .exceptions import PipToolsError\nfrom .utils import as_tuple, key_from_req, lookup_table\n\n\nclass CorruptCacheError(PipToolsError):\n def __init__(self, path):\n self.path = path\n\n def __str__(self):\n lines = [\n \"The dependency cache seems to have been corrupted.\",\n \"Inspect, or delete, the following file:\",\n \" {}\".format(self.path),\n ]\n return os.linesep.join(lines)\n\n\ndef read_cache_file(cache_file_path):\n with open(cache_file_path, \"r\") as cache_file:\n try:\n doc = json.load(cache_file)\n except ValueError:\n raise CorruptCacheError(cache_file_path)\n\n # Check version and load the contents\n if doc[\"__format__\"] != 1:\n raise AssertionError(\"Unknown cache file format\")\n return doc[\"dependencies\"]\n\n\nclass DependencyCache(object):\n \"\"\"\n Creates a new persistent dependency cache for the current Python version.\n The cache file is written to the appropriate user cache dir for the\n current platform, i.e.\n\n ~/.cache/pip-tools/depcache-pyX.Y.json\n\n Where X.Y indicates the Python version.\n \"\"\"\n\n def __init__(self, cache_dir):\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n py_version = \".\".join(str(digit) for digit in sys.version_info[:2])\n cache_filename = \"depcache-py{}.json\".format(py_version)\n\n self._cache_file = os.path.join(cache_dir, cache_filename)\n self._cache = None\n\n @property\n def cache(self):\n \"\"\"\n The dictionary that is the actual in-memory cache. This property\n lazily loads the cache from disk.\n \"\"\"\n if self._cache is None:\n self.read_cache()\n return self._cache\n\n def as_cache_key(self, ireq):\n \"\"\"\n Given a requirement, return its cache key. This behavior is a little weird\n in order to allow backwards compatibility with cache files. For a requirement\n without extras, this will return, for example:\n\n (\"ipython\", \"2.1.0\")\n\n For a requirement with extras, the extras will be comma-separated and appended\n to the version, inside brackets, like so:\n\n (\"ipython\", \"2.1.0[nbconvert,notebook]\")\n \"\"\"\n name, version, extras = as_tuple(ireq)\n if not extras:\n extras_string = \"\"\n else:\n extras_string = \"[{}]\".format(\",\".join(extras))\n return name, \"{}{}\".format(version, extras_string)\n\n def read_cache(self):\n \"\"\"Reads the cached contents into memory.\"\"\"\n if os.path.exists(self._cache_file):\n self._cache = read_cache_file(self._cache_file)\n else:\n self._cache = {}\n\n def write_cache(self):\n \"\"\"Writes the cache to disk as JSON.\"\"\"\n doc = {\"__format__\": 1, \"dependencies\": self._cache}\n with open(self._cache_file, \"w\") as f:\n json.dump(doc, f, sort_keys=True)\n\n def clear(self):\n self._cache = {}\n self.write_cache()\n\n def __contains__(self, ireq):\n pkgname, pkgversion_and_extras = self.as_cache_key(ireq)\n return pkgversion_and_extras in self.cache.get(pkgname, {})\n\n def __getitem__(self, ireq):\n pkgname, pkgversion_and_extras = self.as_cache_key(ireq)\n return self.cache[pkgname][pkgversion_and_extras]\n\n def __setitem__(self, ireq, values):\n pkgname, pkgversion_and_extras = self.as_cache_key(ireq)\n self.cache.setdefault(pkgname, {})\n self.cache[pkgname][pkgversion_and_extras] = values\n self.write_cache()\n\n def reverse_dependencies(self, ireqs):\n \"\"\"\n Returns a lookup table of reverse dependencies for all the given ireqs.\n\n Since this is all static, it only works if the dependency cache\n contains the complete data, otherwise you end up with a partial view.\n This is typically no problem if you use this function after the entire\n dependency tree is resolved.\n \"\"\"\n ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs]\n return self._reverse_dependencies(ireqs_as_cache_values)\n\n def _reverse_dependencies(self, cache_keys):\n \"\"\"\n Returns a lookup table of reverse dependencies for all the given cache keys.\n\n Example input:\n\n [('pep8', '1.5.7'),\n ('flake8', '2.4.0'),\n ('mccabe', '0.3'),\n ('pyflakes', '0.8.1')]\n\n Example output:\n\n {'pep8': ['flake8'],\n 'flake8': [],\n 'mccabe': ['flake8'],\n 'pyflakes': ['flake8']}\n\n \"\"\"\n # First, collect all the dependencies into a sequence of (parent, child)\n # tuples, like [('flake8', 'pep8'), ('flake8', 'mccabe'), ...]\n return lookup_table(\n (key_from_req(Requirement(dep_name)), name)\n for name, version_and_extras in cache_keys\n for dep_name in self.cache[name][version_and_extras]\n )\n", "path": "piptools/cache.py"}]} | 3,398 | 415 |
gh_patches_debug_23167 | rasdani/github-patches | git_diff | onnx__sklearn-onnx-59 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MemoryError when trying to convert TfIdf
Hello
Got an exception when trying to export a pipeline with a TfIdf :
Exception "unhandled MemoryError"
cv = CountVectorizer()
tt = TfidfTransformer()
lsvc = LinearSVC(penalty=penalty, dual=False, tol=1e-3)
text_clf = Pipeline([
('vect', cv),
('tfidf', tt),
('clf', lsvc),
])
text_clf.fit(twenty_train.data, twenty_train.target)
print("Converting text_clf to onnx...")
onnx = convert_sklearn(text_clf, target_opset=9, name='DocClassifierCV-Tfidf-LSVC',
initial_types=[('input', StringTensorType())]
)
Exception "unhandled MemoryError"
The stack is:
convert_sklearn()
convert_topology() :
_registration.get_converter(operator.type)(scope, operator, container)
convert_sklearn_tfidf_transformer()
if not isinstance(cst, numpy.ndarray):
cst = numpy.array(cst.todense())
toDense()
return np.asmatrix(self.toarray(order=order, out=out))
_process_toarray_args()
return np.zeros(self.shape, dtype=self.dtype, order=order)
Could make sens : the input sparse matrix is (strangely) 130000 per 130000, pretty big to be densified.
</issue>
<code>
[start of skl2onnx/operator_converters/TfIdfTransformer.py]
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for
4 # license information.
5 # --------------------------------------------------------------------------
6
7 import numpy
8 import numbers
9 import warnings
10 from ..common._registration import register_converter
11 from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity
12 from ..proto import onnx_proto
13
14
15 def convert_sklearn_tfidf_transformer(scope, operator, container):
16 # TODO: use sparse containers when available
17 op = operator.raw_operator
18 data = operator.input_full_names
19 final = operator.output_full_names
20 C = operator.inputs[0].type.shape[1]
21
22 if op.sublinear_tf:
23 # code scikit-learn
24 # np.log(X.data, X.data) --> does not apply on null coefficient
25 # X.data += 1
26 raise RuntimeError("ONNX does not support sparse tensors, sublinear_tf must be False")
27
28 logged = scope.get_unique_variable_name('logged')
29 apply_log(scope, data, logged, container)
30
31 if not op.use_idf and op.norm is None:
32 loggedplus1 = final
33 else:
34 loggedplus1 = scope.get_unique_variable_name('loggedplus1')
35 ones = scope.get_unique_variable_name('ones')
36 cst = numpy.ones((C,), dtype=numpy.float32)
37 container.add_initializer(ones, onnx_proto.TensorProto.FLOAT, [C], cst.flatten())
38 apply_add(scope, [logged, ones], loggedplus1, container, broadcast=1)
39
40 data = [loggedplus1]
41
42 if op.use_idf:
43 # code scikit-learn
44 # X = X * self._idf_diag
45 cst = op._idf_diag.astype(numpy.float32)
46 if not isinstance(cst, numpy.ndarray):
47 cst = numpy.array(cst.todense())
48 if len(cst.shape) > 1:
49 cst = numpy.diag(cst)
50 cst = cst.ravel().flatten()
51 shape = [len(cst)]
52 idfcst = scope.get_unique_variable_name('idfcst')
53 container.add_initializer(idfcst, onnx_proto.TensorProto.FLOAT, shape, cst)
54 idfed = final if op.norm is None else scope.get_unique_variable_name('idfed')
55 apply_mul(scope, data + [idfcst], idfed, container, broadcast=1)
56 data = [idfed]
57
58 if op.norm is not None:
59 op_type = 'Normalizer'
60 norm_map = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}
61 attrs = {'name': scope.get_unique_operator_name(op_type)}
62 if op.norm in norm_map:
63 attrs['norm'] = norm_map[op.norm]
64 else:
65 raise RuntimeError('Invalid norm: %s' % op.norm)
66
67 container.add_node(op_type, data, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)
68 data = None
69
70 if data == operator.input_full_names:
71 # Nothing happened --> identity
72 apply_identity(scope, data, final, container)
73
74
75 register_converter('SklearnTfidfTransformer', convert_sklearn_tfidf_transformer)
76
[end of skl2onnx/operator_converters/TfIdfTransformer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skl2onnx/operator_converters/TfIdfTransformer.py b/skl2onnx/operator_converters/TfIdfTransformer.py
--- a/skl2onnx/operator_converters/TfIdfTransformer.py
+++ b/skl2onnx/operator_converters/TfIdfTransformer.py
@@ -4,9 +4,10 @@
# license information.
# --------------------------------------------------------------------------
-import numpy
import numbers
import warnings
+import numpy
+from scipy.sparse import diags
from ..common._registration import register_converter
from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity
from ..proto import onnx_proto
@@ -44,7 +45,11 @@
# X = X * self._idf_diag
cst = op._idf_diag.astype(numpy.float32)
if not isinstance(cst, numpy.ndarray):
- cst = numpy.array(cst.todense())
+ if len(cst.shape) > 1:
+ n = cst.shape[0]
+ cst = numpy.array([cst[i, i] for i in range(n)])
+ else:
+ cst = numpy.array(cst.todense())
if len(cst.shape) > 1:
cst = numpy.diag(cst)
cst = cst.ravel().flatten()
| {"golden_diff": "diff --git a/skl2onnx/operator_converters/TfIdfTransformer.py b/skl2onnx/operator_converters/TfIdfTransformer.py\n--- a/skl2onnx/operator_converters/TfIdfTransformer.py\n+++ b/skl2onnx/operator_converters/TfIdfTransformer.py\n@@ -4,9 +4,10 @@\n # license information.\n # --------------------------------------------------------------------------\n \n-import numpy\n import numbers\n import warnings\n+import numpy\n+from scipy.sparse import diags\n from ..common._registration import register_converter\n from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity\n from ..proto import onnx_proto\n@@ -44,7 +45,11 @@\n # X = X * self._idf_diag\n cst = op._idf_diag.astype(numpy.float32)\n if not isinstance(cst, numpy.ndarray):\n- cst = numpy.array(cst.todense())\n+ if len(cst.shape) > 1:\n+ n = cst.shape[0]\n+ cst = numpy.array([cst[i, i] for i in range(n)])\n+ else:\n+ cst = numpy.array(cst.todense())\n if len(cst.shape) > 1:\n cst = numpy.diag(cst)\n cst = cst.ravel().flatten()\n", "issue": "MemoryError when trying to convert TfIdf\nHello\r\nGot an exception when trying to export a pipeline with a TfIdf : \r\nException \"unhandled MemoryError\"\r\n cv = CountVectorizer()\r\n tt = TfidfTransformer()\r\n lsvc = LinearSVC(penalty=penalty, dual=False, tol=1e-3)\r\n text_clf = Pipeline([ \r\n ('vect', cv),\r\n ('tfidf', tt),\r\n ('clf', lsvc),\r\n ])\r\n text_clf.fit(twenty_train.data, twenty_train.target) \r\n print(\"Converting text_clf to onnx...\")\r\n onnx = convert_sklearn(text_clf, target_opset=9, name='DocClassifierCV-Tfidf-LSVC', \r\n initial_types=[('input', StringTensorType())]\r\n )\r\n\r\nException \"unhandled MemoryError\"\r\n\r\nThe stack is:\r\n\r\nconvert_sklearn()\r\n\r\nconvert_topology() : \r\n _registration.get_converter(operator.type)(scope, operator, container)\r\n\r\nconvert_sklearn_tfidf_transformer()\r\n if not isinstance(cst, numpy.ndarray):\r\n cst = numpy.array(cst.todense())\r\n\r\ntoDense()\r\n return np.asmatrix(self.toarray(order=order, out=out))\r\n\r\n_process_toarray_args()\r\n return np.zeros(self.shape, dtype=self.dtype, order=order)\r\n\r\nCould make sens : the input sparse matrix is (strangely) 130000 per 130000, pretty big to be densified.\r\n\n", "before_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport numpy\nimport numbers\nimport warnings\nfrom ..common._registration import register_converter\nfrom ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity\nfrom ..proto import onnx_proto\n\n\ndef convert_sklearn_tfidf_transformer(scope, operator, container):\n # TODO: use sparse containers when available\n op = operator.raw_operator\n data = operator.input_full_names\n final = operator.output_full_names\n C = operator.inputs[0].type.shape[1]\n \n if op.sublinear_tf:\n # code scikit-learn\n # np.log(X.data, X.data) --> does not apply on null coefficient\n # X.data += 1\n raise RuntimeError(\"ONNX does not support sparse tensors, sublinear_tf must be False\")\n \n logged = scope.get_unique_variable_name('logged')\n apply_log(scope, data, logged, container)\n \n if not op.use_idf and op.norm is None:\n loggedplus1 = final\n else:\n loggedplus1 = scope.get_unique_variable_name('loggedplus1')\n ones = scope.get_unique_variable_name('ones')\n cst = numpy.ones((C,), dtype=numpy.float32)\n container.add_initializer(ones, onnx_proto.TensorProto.FLOAT, [C], cst.flatten()) \n apply_add(scope, [logged, ones], loggedplus1, container, broadcast=1)\n \n data = [loggedplus1]\n \n if op.use_idf:\n # code scikit-learn\n # X = X * self._idf_diag\n cst = op._idf_diag.astype(numpy.float32)\n if not isinstance(cst, numpy.ndarray):\n cst = numpy.array(cst.todense())\n if len(cst.shape) > 1:\n cst = numpy.diag(cst)\n cst = cst.ravel().flatten()\n shape = [len(cst)]\n idfcst = scope.get_unique_variable_name('idfcst')\n container.add_initializer(idfcst, onnx_proto.TensorProto.FLOAT, shape, cst)\n idfed = final if op.norm is None else scope.get_unique_variable_name('idfed')\n apply_mul(scope, data + [idfcst], idfed, container, broadcast=1)\n data = [idfed]\n\n if op.norm is not None:\n op_type = 'Normalizer'\n norm_map = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}\n attrs = {'name': scope.get_unique_operator_name(op_type)}\n if op.norm in norm_map:\n attrs['norm'] = norm_map[op.norm]\n else:\n raise RuntimeError('Invalid norm: %s' % op.norm)\n\n container.add_node(op_type, data, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)\n data = None\n \n if data == operator.input_full_names:\n # Nothing happened --> identity\n apply_identity(scope, data, final, container)\n\n\nregister_converter('SklearnTfidfTransformer', convert_sklearn_tfidf_transformer)\n", "path": "skl2onnx/operator_converters/TfIdfTransformer.py"}]} | 1,702 | 285 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.