problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_22216 | rasdani/github-patches | git_diff | spacetelescope__jwql-483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify conda environments
Since `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.21.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 REQUIRES = [
13 'astropy>=3.2.1',
14 'astroquery>=0.3.9',
15 'authlib',
16 'bokeh>=1.0',
17 'django>=2.0',
18 'jinja2',
19 'jsonschema==2.6.0',
20 'jwedb',
21 'jwst',
22 'matplotlib',
23 'numpy',
24 'numpydoc',
25 'pandas',
26 'psycopg2',
27 'pysiaf',
28 'pytest',
29 'sphinx',
30 'sqlalchemy',
31 'stsci_rtd_theme'
32 ]
33
34 setup(
35 name='jwql',
36 version=VERSION,
37 description=DESCRIPTION,
38 url='https://github.com/spacetelescope/jwql.git',
39 author=AUTHORS,
40 author_email='[email protected]',
41 license='BSD',
42 keywords=['astronomy', 'python'],
43 classifiers=['Programming Language :: Python'],
44 packages=find_packages(),
45 install_requires=REQUIRES,
46 include_package_data=True,
47 include_dirs=[np.get_include()],
48 )
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,26 @@
VERSION = '0.21.0'
-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
REQUIRES = [
+ 'asdf>=2.3.3',
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
'bokeh>=1.0',
+ 'codecov',
'django>=2.0',
+ 'flake8',
+ 'inflection',
+ 'ipython',
'jinja2',
'jsonschema==2.6.0',
- 'jwedb',
- 'jwst',
+ 'jwedb>=0.0.3',
+ 'jwst==0.13.0',
'matplotlib',
'numpy',
'numpydoc',
@@ -26,9 +31,12 @@
'psycopg2',
'pysiaf',
'pytest',
+ 'pytest-cov',
+ 'scipy',
'sphinx',
'sqlalchemy',
- 'stsci_rtd_theme'
+ 'stsci_rtd_theme',
+ 'twine'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,26 @@\n \n VERSION = '0.21.0'\n \n-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n \n DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n REQUIRES = [\n+ 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n+ 'codecov',\n 'django>=2.0',\n+ 'flake8',\n+ 'inflection',\n+ 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n- 'jwedb',\n- 'jwst',\n+ 'jwedb>=0.0.3',\n+ 'jwst==0.13.0',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n@@ -26,9 +31,12 @@\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n+ 'pytest-cov',\n+ 'scipy',\n 'sphinx',\n 'sqlalchemy',\n- 'stsci_rtd_theme'\n+ 'stsci_rtd_theme',\n+ 'twine'\n ]\n \n setup(\n", "issue": "Simplify conda environments \nSince `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.21.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,039 | 391 |
gh_patches_debug_37169 | rasdani/github-patches | git_diff | HybirdCorp__creme_crm-84 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicated buttons
The configuration allows adding a button twice to a content type:
- On the button configuration page `/creme_config/button_menu/portal`
- Given a button that is not specific to a content type (Usable in the default configuration)
- Remove this button from the default configuration (if exists)
- Add this button to a content type (Contact for example)
- Add this button again to the default configuration
- The button is present twice on a Contact page.
version 2.2 alpha (master)
</issue>
<code>
[start of creme/creme_core/templatetags/creme_menu.py]
1 # -*- coding: utf-8 -*-
2
3 ################################################################################
4 # Creme is a free/open-source Customer Relationship Management software
5 # Copyright (C) 2009-2020 Hybird
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 ################################################################################
20
21 from django.db.models import Q
22 from django.template import Library
23
24 from ..gui.button_menu import button_registry
25 from ..gui.menu import creme_menu
26 from ..models import ButtonMenuItem
27
28 register = Library()
29
30
31 @register.simple_tag(takes_context=True)
32 def menu_display(context):
33 return creme_menu.render(context)
34
35
36 # TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)
37 @register.inclusion_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)
38 def menu_buttons_display(context):
39 entity = context['object']
40 bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |
41 Q(content_type__isnull=True)
42 ) \
43 .exclude(button_id='') \
44 .order_by('order') \
45 .values_list('button_id', flat=True)
46
47 button_ctxt = context.flatten()
48 # TODO: pass the registry in the context ?
49 context['buttons'] = [
50 button.render(button_ctxt)
51 for button in button_registry.get_buttons(bmi, entity)
52 ]
53
54 return context
55
[end of creme/creme_core/templatetags/creme_menu.py]
[start of creme/creme_core/gui/button_menu.py]
1 # -*- coding: utf-8 -*-
2
3 ################################################################################
4 # Creme is a free/open-source Customer Relationship Management software
5 # Copyright (C) 2009-2020 Hybird
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 ################################################################################
20
21 import logging
22 from typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple, Type
23
24 from django.template.loader import get_template
25
26 from ..models import CremeEntity
27
28 logger = logging.getLogger(__name__)
29
30
31 class Button:
32 # ID of the button, stored in DB (ie: the button configuration), to retrieve
33 # the right button class (so it must be unique)
34 # Override it in child class with a value generated by 'generate_id()'.
35 # id_ = None
36 id_: str = ''
37
38 # Label used in the configuration GUI to display the button (see models.ButtonMenuItem)
39 # Tips: use gettext_lazy()
40 verbose_name: str = 'BUTTON'
41
42 # Name/path of the template used to render the button.
43 template_name: str = 'creme_core/buttons/place-holder.html'
44
45 # Permission string ; None means not permission needed.
46 # eg :'myapp.add_mymodel'
47 # BEWARE: you have to use the template context variable 'has_perm' yourself !!
48 permission: Optional[str] = None # TODO: <permission: str = ''> ??
49
50 @staticmethod
51 def generate_id(app_name: str, name: str) -> str:
52 return f'button_{app_name}-{name}'
53
54 def get_ctypes(self) -> Sequence[Type[CremeEntity]]:
55 """
56 @return A sequence of CremeEntity class that can have this type of button.
57 Void sequence means that all types are ok.
58 eg: (Contact, Organisation)
59 """
60 return ()
61
62 def has_perm(self, context) -> bool:
63 permission = self.permission
64 return context['request'].user.has_perm(permission) if permission else True
65
66 def ok_4_display(self, entity: CremeEntity) -> bool:
67 """Can this button be displayed on this entity's detail-view ?
68 @param entity: CremeEntity which detail-view is displayed.
69 @return True if the button can be displayed for 'entity'.
70 """
71 return True
72
73 def render(self, context) -> str:
74 context['has_perm'] = self.has_perm(context)
75
76 return get_template(self.template_name).render(context)
77
78
79 class ButtonsRegistry:
80 class RegistrationError(Exception):
81 pass
82
83 def __init__(self):
84 self._button_classes: Dict[str, Type[Button]] = {}
85
86 def register(self, *button_classes: Type[Button]) -> None:
87 """
88 @type button_classes: creme_core.gui.menu_buttons.Button child classes.
89 """
90 setdefault = self._button_classes.setdefault
91
92 for button_cls in button_classes:
93 button_id = button_cls.id_
94
95 if not button_id:
96 raise self.RegistrationError(
97 f'Button class with empty id_: {button_cls}'
98 )
99
100 if setdefault(button_id, button_cls) is not button_cls:
101 raise self.RegistrationError(
102 f"Duplicated button's ID (or button registered twice) : {button_id}"
103 )
104
105 def get_button(self, button_id: str) -> Optional[Button]:
106 cls = self._button_classes.get(button_id)
107
108 return cls() if cls else None
109
110 def get_buttons(self, id_list: Iterable[str], entity: CremeEntity) -> Iterator[Button]:
111 """Generate the Buttons to be displayed on the detail-view of an entity.
112 Deprecated buttons & buttons that should not be displayed for this entity
113 are ignored.
114 @param id_list: Sequence of button IDs.
115 @param entity: CremeEntity instance.
116 @yield creme_core.gui.button_menu.Button instances.
117 """
118 button_classes = self._button_classes
119
120 for button_id in id_list:
121 button_cls = button_classes.get(button_id)
122
123 if button_cls is None:
124 logger.warning('Button seems deprecated: %s', button_id)
125 else:
126 button = button_cls()
127
128 if button.ok_4_display(entity):
129 yield button
130
131 def __iter__(self) -> Iterator[Tuple[str, Button]]:
132 for b_id, b_cls in self._button_classes.items():
133 yield b_id, b_cls()
134
135
136 button_registry = ButtonsRegistry()
137
[end of creme/creme_core/gui/button_menu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/creme/creme_core/gui/button_menu.py b/creme/creme_core/gui/button_menu.py
--- a/creme/creme_core/gui/button_menu.py
+++ b/creme/creme_core/gui/button_menu.py
@@ -61,7 +61,8 @@
def has_perm(self, context) -> bool:
permission = self.permission
- return context['request'].user.has_perm(permission) if permission else True
+ # return context['request'].user.has_perm(permission) if permission else True
+ return context['user'].has_perm(permission) if permission else True
def ok_4_display(self, entity: CremeEntity) -> bool:
"""Can this button be displayed on this entity's detail-view ?
diff --git a/creme/creme_core/templatetags/creme_menu.py b/creme/creme_core/templatetags/creme_menu.py
--- a/creme/creme_core/templatetags/creme_menu.py
+++ b/creme/creme_core/templatetags/creme_menu.py
@@ -18,10 +18,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
+from collections import OrderedDict
+
from django.db.models import Q
from django.template import Library
-from ..gui.button_menu import button_registry
+from ..gui import button_menu
from ..gui.menu import creme_menu
from ..models import ButtonMenuItem
@@ -34,21 +36,29 @@
# TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)
[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)
[email protected]_tag(
+ 'creme_core/templatetags/menu_buttons.html', takes_context=True,
+)
def menu_buttons_display(context):
entity = context['object']
- bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |
- Q(content_type__isnull=True)
- ) \
- .exclude(button_id='') \
- .order_by('order') \
- .values_list('button_id', flat=True)
+ bmi = ButtonMenuItem.objects.filter(
+ Q(content_type=entity.entity_type)
+ | Q(content_type__isnull=True)
+ ).exclude(
+ button_id='',
+ ).order_by(
+ 'order',
+ ).values_list(
+ 'button_id', flat=True,
+ )
button_ctxt = context.flatten()
# TODO: pass the registry in the context ?
- context['buttons'] = [
- button.render(button_ctxt)
- for button in button_registry.get_buttons(bmi, entity)
- ]
+ buttons = OrderedDict()
+
+ for button in button_menu.button_registry.get_buttons(bmi, entity):
+ buttons[button.id_] = button.render(button_ctxt)
+
+ context['buttons'] = [*buttons.values()]
return context
| {"golden_diff": "diff --git a/creme/creme_core/gui/button_menu.py b/creme/creme_core/gui/button_menu.py\n--- a/creme/creme_core/gui/button_menu.py\n+++ b/creme/creme_core/gui/button_menu.py\n@@ -61,7 +61,8 @@\n \n def has_perm(self, context) -> bool:\n permission = self.permission\n- return context['request'].user.has_perm(permission) if permission else True\n+ # return context['request'].user.has_perm(permission) if permission else True\n+ return context['user'].has_perm(permission) if permission else True\n \n def ok_4_display(self, entity: CremeEntity) -> bool:\n \"\"\"Can this button be displayed on this entity's detail-view ?\ndiff --git a/creme/creme_core/templatetags/creme_menu.py b/creme/creme_core/templatetags/creme_menu.py\n--- a/creme/creme_core/templatetags/creme_menu.py\n+++ b/creme/creme_core/templatetags/creme_menu.py\n@@ -18,10 +18,12 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n ################################################################################\n \n+from collections import OrderedDict\n+\n from django.db.models import Q\n from django.template import Library\n \n-from ..gui.button_menu import button_registry\n+from ..gui import button_menu\n from ..gui.menu import creme_menu\n from ..models import ButtonMenuItem\n \n@@ -34,21 +36,29 @@\n \n \n # TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)\[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)\[email protected]_tag(\n+ 'creme_core/templatetags/menu_buttons.html', takes_context=True,\n+)\n def menu_buttons_display(context):\n entity = context['object']\n- bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |\n- Q(content_type__isnull=True)\n- ) \\\n- .exclude(button_id='') \\\n- .order_by('order') \\\n- .values_list('button_id', flat=True)\n+ bmi = ButtonMenuItem.objects.filter(\n+ Q(content_type=entity.entity_type)\n+ | Q(content_type__isnull=True)\n+ ).exclude(\n+ button_id='',\n+ ).order_by(\n+ 'order',\n+ ).values_list(\n+ 'button_id', flat=True,\n+ )\n \n button_ctxt = context.flatten()\n # TODO: pass the registry in the context ?\n- context['buttons'] = [\n- button.render(button_ctxt)\n- for button in button_registry.get_buttons(bmi, entity)\n- ]\n+ buttons = OrderedDict()\n+\n+ for button in button_menu.button_registry.get_buttons(bmi, entity):\n+ buttons[button.id_] = button.render(button_ctxt)\n+\n+ context['buttons'] = [*buttons.values()]\n \n return context\n", "issue": "Duplicated buttons\nThe configuration allows adding a button twice to a content type:\r\n\r\n- On the button configuration page `/creme_config/button_menu/portal`\r\n- Given a button that is not specific to a content type (Usable in the default configuration)\r\n- Remove this button from the default configuration (if exists)\r\n- Add this button to a content type (Contact for example)\r\n- Add this button again to the default configuration\r\n- The button is present twice on a Contact page.\r\n\r\nversion 2.2 alpha (master)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nfrom django.db.models import Q\nfrom django.template import Library\n\nfrom ..gui.button_menu import button_registry\nfrom ..gui.menu import creme_menu\nfrom ..models import ButtonMenuItem\n\nregister = Library()\n\n\[email protected]_tag(takes_context=True)\ndef menu_display(context):\n return creme_menu.render(context)\n\n\n# TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)\[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)\ndef menu_buttons_display(context):\n entity = context['object']\n bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |\n Q(content_type__isnull=True)\n ) \\\n .exclude(button_id='') \\\n .order_by('order') \\\n .values_list('button_id', flat=True)\n\n button_ctxt = context.flatten()\n # TODO: pass the registry in the context ?\n context['buttons'] = [\n button.render(button_ctxt)\n for button in button_registry.get_buttons(bmi, entity)\n ]\n\n return context\n", "path": "creme/creme_core/templatetags/creme_menu.py"}, {"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nimport logging\nfrom typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple, Type\n\nfrom django.template.loader import get_template\n\nfrom ..models import CremeEntity\n\nlogger = logging.getLogger(__name__)\n\n\nclass Button:\n # ID of the button, stored in DB (ie: the button configuration), to retrieve\n # the right button class (so it must be unique)\n # Override it in child class with a value generated by 'generate_id()'.\n # id_ = None\n id_: str = ''\n\n # Label used in the configuration GUI to display the button (see models.ButtonMenuItem)\n # Tips: use gettext_lazy()\n verbose_name: str = 'BUTTON'\n\n # Name/path of the template used to render the button.\n template_name: str = 'creme_core/buttons/place-holder.html'\n\n # Permission string ; None means not permission needed.\n # eg :'myapp.add_mymodel'\n # BEWARE: you have to use the template context variable 'has_perm' yourself !!\n permission: Optional[str] = None # TODO: <permission: str = ''> ??\n\n @staticmethod\n def generate_id(app_name: str, name: str) -> str:\n return f'button_{app_name}-{name}'\n\n def get_ctypes(self) -> Sequence[Type[CremeEntity]]:\n \"\"\"\n @return A sequence of CremeEntity class that can have this type of button.\n Void sequence means that all types are ok.\n eg: (Contact, Organisation)\n \"\"\"\n return ()\n\n def has_perm(self, context) -> bool:\n permission = self.permission\n return context['request'].user.has_perm(permission) if permission else True\n\n def ok_4_display(self, entity: CremeEntity) -> bool:\n \"\"\"Can this button be displayed on this entity's detail-view ?\n @param entity: CremeEntity which detail-view is displayed.\n @return True if the button can be displayed for 'entity'.\n \"\"\"\n return True\n\n def render(self, context) -> str:\n context['has_perm'] = self.has_perm(context)\n\n return get_template(self.template_name).render(context)\n\n\nclass ButtonsRegistry:\n class RegistrationError(Exception):\n pass\n\n def __init__(self):\n self._button_classes: Dict[str, Type[Button]] = {}\n\n def register(self, *button_classes: Type[Button]) -> None:\n \"\"\"\n @type button_classes: creme_core.gui.menu_buttons.Button child classes.\n \"\"\"\n setdefault = self._button_classes.setdefault\n\n for button_cls in button_classes:\n button_id = button_cls.id_\n\n if not button_id:\n raise self.RegistrationError(\n f'Button class with empty id_: {button_cls}'\n )\n\n if setdefault(button_id, button_cls) is not button_cls:\n raise self.RegistrationError(\n f\"Duplicated button's ID (or button registered twice) : {button_id}\"\n )\n\n def get_button(self, button_id: str) -> Optional[Button]:\n cls = self._button_classes.get(button_id)\n\n return cls() if cls else None\n\n def get_buttons(self, id_list: Iterable[str], entity: CremeEntity) -> Iterator[Button]:\n \"\"\"Generate the Buttons to be displayed on the detail-view of an entity.\n Deprecated buttons & buttons that should not be displayed for this entity\n are ignored.\n @param id_list: Sequence of button IDs.\n @param entity: CremeEntity instance.\n @yield creme_core.gui.button_menu.Button instances.\n \"\"\"\n button_classes = self._button_classes\n\n for button_id in id_list:\n button_cls = button_classes.get(button_id)\n\n if button_cls is None:\n logger.warning('Button seems deprecated: %s', button_id)\n else:\n button = button_cls()\n\n if button.ok_4_display(entity):\n yield button\n\n def __iter__(self) -> Iterator[Tuple[str, Button]]:\n for b_id, b_cls in self._button_classes.items():\n yield b_id, b_cls()\n\n\nbutton_registry = ButtonsRegistry()\n", "path": "creme/creme_core/gui/button_menu.py"}]} | 2,624 | 671 |
gh_patches_debug_29419 | rasdani/github-patches | git_diff | sublimelsp__LSP-2232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Settings for editing inlay hints
**Is your feature request related to a problem? Please describe.**
I wish to style the inlay hints that LSP provides, however, LSP only allows for very limited styling, basically background color, font type, font color, but nothing else. There are these ugly round borders on the little hint that cannot be removed without editing inlay_hint.py directly to change the enforced CSS.
**Describe the solution you'd like**
The ability to either change all CSS values of the inlay hint though variables, or the ability to outright overwrite the css that's currently there from a settings file.
**Describe alternatives you've considered**
Editing LSP with PackageResourceViewer by hand each time there is an update.
**Additional context**
Here is my cute little CSS improvement for my own enjoyment, no more distracting button-like border :<)

</issue>
<code>
[start of plugin/core/css.py]
1 import sublime
2 from .typing import Optional
3
4
5 class CSS:
6 def __init__(self) -> None:
7 self.popups = sublime.load_resource("Packages/LSP/popups.css")
8 self.popups_classname = "lsp_popup"
9 self.notification = sublime.load_resource("Packages/LSP/notification.css")
10 self.notification_classname = "notification"
11 self.sheets = sublime.load_resource("Packages/LSP/sheets.css")
12 self.sheets_classname = "lsp_sheet"
13
14
15 _css = None # type: Optional[CSS]
16
17
18 def load() -> None:
19 global _css
20 _css = CSS()
21
22
23 def css() -> CSS:
24 global _css
25 assert _css is not None
26 return _css
27
[end of plugin/core/css.py]
[start of plugin/inlay_hint.py]
1 from .core.protocol import InlayHint
2 from .core.protocol import InlayHintLabelPart
3 from .core.protocol import MarkupContent
4 from .core.protocol import Point
5 from .core.protocol import Request
6 from .core.registry import LspTextCommand
7 from .core.registry import LspWindowCommand
8 from .core.sessions import Session
9 from .core.settings import userprefs
10 from .core.typing import cast, Optional, Union
11 from .core.views import point_to_offset
12 from .formatting import apply_text_edits_to_view
13 import html
14 import sublime
15 import uuid
16
17
18 class LspToggleInlayHintsCommand(LspWindowCommand):
19 capability = 'inlayHintProvider'
20
21 def run(self, enable: Optional[bool] = None) -> None:
22 if not isinstance(enable, bool):
23 enable = not self.are_enabled(self.window)
24 self.window.settings().set('lsp_show_inlay_hints', enable)
25 status = 'on' if enable else 'off'
26 sublime.status_message('Inlay Hints are {}'.format(status))
27 for session in self.sessions():
28 for sv in session.session_views_async():
29 sv.session_buffer.do_inlay_hints_async(sv.view)
30
31 def is_checked(self) -> bool:
32 return self.are_enabled(self.window)
33
34 @classmethod
35 def are_enabled(cls, window: Optional[sublime.Window]) -> bool:
36 if not window:
37 return userprefs().show_inlay_hints
38 return bool(window.settings().get('lsp_show_inlay_hints', userprefs().show_inlay_hints))
39
40
41 class LspInlayHintClickCommand(LspTextCommand):
42 capability = 'inlayHintProvider'
43
44 def run(self, _edit: sublime.Edit, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,
45 event: Optional[dict] = None, label_part: Optional[InlayHintLabelPart] = None) -> None:
46 # Insert textEdits for the given inlay hint.
47 # If a InlayHintLabelPart was clicked, label_part will be passed as an argument to the LspInlayHintClickCommand
48 # and InlayHintLabelPart.command will be executed.
49 session = self.session_by_name(session_name, 'inlayHintProvider')
50 if session and session.has_capability('inlayHintProvider.resolveProvider'):
51 request = Request.resolveInlayHint(inlay_hint, self.view)
52 session.send_request_async(
53 request,
54 lambda response: self.handle(session_name, response, phantom_uuid, label_part))
55 return
56 self.handle(session_name, inlay_hint, phantom_uuid, label_part)
57
58 def handle(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,
59 label_part: Optional[InlayHintLabelPart] = None) -> None:
60 self.handle_inlay_hint_text_edits(session_name, inlay_hint, phantom_uuid)
61 self.handle_label_part_command(session_name, label_part)
62
63 def handle_inlay_hint_text_edits(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str) -> None:
64 session = self.session_by_name(session_name, 'inlayHintProvider')
65 if not session:
66 return
67 text_edits = inlay_hint.get('textEdits')
68 if not text_edits:
69 return
70 for sb in session.session_buffers_async():
71 sb.remove_inlay_hint_phantom(phantom_uuid)
72 apply_text_edits_to_view(text_edits, self.view)
73
74 def handle_label_part_command(self, session_name: str, label_part: Optional[InlayHintLabelPart] = None) -> None:
75 if not label_part:
76 return
77 command = label_part.get('command')
78 if not command:
79 return
80 args = {
81 "session_name": session_name,
82 "command_name": command["command"],
83 "command_args": command.get("arguments")
84 }
85 self.view.run_command("lsp_execute", args)
86
87
88 def inlay_hint_to_phantom(view: sublime.View, inlay_hint: InlayHint, session: Session) -> sublime.Phantom:
89 position = inlay_hint["position"]
90 region = sublime.Region(point_to_offset(Point.from_lsp(position), view))
91 phantom_uuid = str(uuid.uuid4())
92 content = get_inlay_hint_html(view, inlay_hint, session, phantom_uuid)
93 p = sublime.Phantom(region, content, sublime.LAYOUT_INLINE)
94 setattr(p, 'lsp_uuid', phantom_uuid)
95 return p
96
97
98 def get_inlay_hint_html(view: sublime.View, inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:
99 label = format_inlay_hint_label(inlay_hint, session, phantom_uuid)
100 font = view.settings().get('font_face') or "monospace"
101 html = """
102 <body id="lsp-inlay-hint">
103 <style>
104 .inlay-hint {{
105 color: color(var(--foreground) alpha(0.6));
106 background-color: color(var(--foreground) alpha(0.08));
107 border-radius: 4px;
108 padding: 0.05em 4px;
109 font-size: 0.9em;
110 font-family: {font};
111 }}
112
113 .inlay-hint a {{
114 color: color(var(--foreground) alpha(0.6));
115 text-decoration: none;
116 }}
117 </style>
118 <div class="inlay-hint">
119 {label}
120 </div>
121 </body>
122 """.format(
123 font=font,
124 label=label
125 )
126 return html
127
128
129 def format_inlay_hint_tooltip(tooltip: Optional[Union[str, MarkupContent]]) -> str:
130 if isinstance(tooltip, str):
131 return html.escape(tooltip)
132 if isinstance(tooltip, dict): # MarkupContent
133 return html.escape(tooltip.get('value') or "")
134 return ""
135
136
137 def format_inlay_hint_label(inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:
138 tooltip = format_inlay_hint_tooltip(inlay_hint.get("tooltip"))
139 result = ""
140 can_resolve_inlay_hint = session.has_capability('inlayHintProvider.resolveProvider')
141 label = inlay_hint['label']
142 has_text_edits = bool(inlay_hint.get('textEdits'))
143 is_clickable = has_text_edits or can_resolve_inlay_hint
144 if isinstance(label, str):
145 if is_clickable:
146 inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {
147 'command': 'lsp_inlay_hint_click',
148 'args': {
149 'session_name': session.config.name,
150 'inlay_hint': cast(dict, inlay_hint),
151 'phantom_uuid': phantom_uuid
152 }
153 })
154 result += '<a href="{command}">'.format(command=inlay_hint_click_command)
155 instruction_text = '\nDouble-click to insert' if has_text_edits else ""
156 result += '<span title="{tooltip}">{value}</span>'.format(
157 tooltip=(tooltip + instruction_text).strip(),
158 value=html.escape(label)
159 )
160 if is_clickable:
161 result += "</a>"
162 return result
163
164 for label_part in label:
165 value = ""
166 tooltip = format_inlay_hint_tooltip(label_part.get("tooltip"))
167 has_command = bool(label_part.get('command'))
168 if has_command:
169 inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {
170 'command': 'lsp_inlay_hint_click',
171 'args': {
172 'session_name': session.config.name,
173 'inlay_hint': cast(dict, inlay_hint),
174 'phantom_uuid': phantom_uuid,
175 'label_part': cast(dict, label_part)
176 }
177 })
178 value += '<a href="{command}">'.format(command=inlay_hint_click_command)
179 value += html.escape(label_part['value'])
180 if has_command:
181 value += "</a>"
182 # InlayHintLabelPart.location is not supported
183 instruction_text = '\nDouble-click to execute' if has_command else ""
184 result += "<span title=\"{tooltip}\">{value}</span>".format(
185 tooltip=(tooltip + instruction_text).strip(),
186 value=value
187 )
188 return result
189
[end of plugin/inlay_hint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/css.py b/plugin/core/css.py
--- a/plugin/core/css.py
+++ b/plugin/core/css.py
@@ -10,6 +10,7 @@
self.notification_classname = "notification"
self.sheets = sublime.load_resource("Packages/LSP/sheets.css")
self.sheets_classname = "lsp_sheet"
+ self.inlay_hints = sublime.load_resource("Packages/LSP/inlay_hints.css")
_css = None # type: Optional[CSS]
diff --git a/plugin/inlay_hint.py b/plugin/inlay_hint.py
--- a/plugin/inlay_hint.py
+++ b/plugin/inlay_hint.py
@@ -1,3 +1,4 @@
+from .core.css import css
from .core.protocol import InlayHint
from .core.protocol import InlayHintLabelPart
from .core.protocol import MarkupContent
@@ -102,18 +103,9 @@
<body id="lsp-inlay-hint">
<style>
.inlay-hint {{
- color: color(var(--foreground) alpha(0.6));
- background-color: color(var(--foreground) alpha(0.08));
- border-radius: 4px;
- padding: 0.05em 4px;
- font-size: 0.9em;
font-family: {font};
}}
-
- .inlay-hint a {{
- color: color(var(--foreground) alpha(0.6));
- text-decoration: none;
- }}
+ {css}
</style>
<div class="inlay-hint">
{label}
@@ -121,6 +113,7 @@
</body>
""".format(
font=font,
+ css=css().inlay_hints,
label=label
)
return html
| {"golden_diff": "diff --git a/plugin/core/css.py b/plugin/core/css.py\n--- a/plugin/core/css.py\n+++ b/plugin/core/css.py\n@@ -10,6 +10,7 @@\n self.notification_classname = \"notification\"\n self.sheets = sublime.load_resource(\"Packages/LSP/sheets.css\")\n self.sheets_classname = \"lsp_sheet\"\n+ self.inlay_hints = sublime.load_resource(\"Packages/LSP/inlay_hints.css\")\n \n \n _css = None # type: Optional[CSS]\ndiff --git a/plugin/inlay_hint.py b/plugin/inlay_hint.py\n--- a/plugin/inlay_hint.py\n+++ b/plugin/inlay_hint.py\n@@ -1,3 +1,4 @@\n+from .core.css import css\n from .core.protocol import InlayHint\n from .core.protocol import InlayHintLabelPart\n from .core.protocol import MarkupContent\n@@ -102,18 +103,9 @@\n <body id=\"lsp-inlay-hint\">\n <style>\n .inlay-hint {{\n- color: color(var(--foreground) alpha(0.6));\n- background-color: color(var(--foreground) alpha(0.08));\n- border-radius: 4px;\n- padding: 0.05em 4px;\n- font-size: 0.9em;\n font-family: {font};\n }}\n-\n- .inlay-hint a {{\n- color: color(var(--foreground) alpha(0.6));\n- text-decoration: none;\n- }}\n+ {css}\n </style>\n <div class=\"inlay-hint\">\n {label}\n@@ -121,6 +113,7 @@\n </body>\n \"\"\".format(\n font=font,\n+ css=css().inlay_hints,\n label=label\n )\n return html\n", "issue": "Settings for editing inlay hints\n**Is your feature request related to a problem? Please describe.**\r\nI wish to style the inlay hints that LSP provides, however, LSP only allows for very limited styling, basically background color, font type, font color, but nothing else. There are these ugly round borders on the little hint that cannot be removed without editing inlay_hint.py directly to change the enforced CSS.\r\n\r\n**Describe the solution you'd like**\r\nThe ability to either change all CSS values of the inlay hint though variables, or the ability to outright overwrite the css that's currently there from a settings file.\r\n\r\n**Describe alternatives you've considered**\r\nEditing LSP with PackageResourceViewer by hand each time there is an update.\r\n\r\n**Additional context**\r\nHere is my cute little CSS improvement for my own enjoyment, no more distracting button-like border :<)\r\n\r\n\r\n\n", "before_files": [{"content": "import sublime\nfrom .typing import Optional\n\n\nclass CSS:\n def __init__(self) -> None:\n self.popups = sublime.load_resource(\"Packages/LSP/popups.css\")\n self.popups_classname = \"lsp_popup\"\n self.notification = sublime.load_resource(\"Packages/LSP/notification.css\")\n self.notification_classname = \"notification\"\n self.sheets = sublime.load_resource(\"Packages/LSP/sheets.css\")\n self.sheets_classname = \"lsp_sheet\"\n\n\n_css = None # type: Optional[CSS]\n\n\ndef load() -> None:\n global _css\n _css = CSS()\n\n\ndef css() -> CSS:\n global _css\n assert _css is not None\n return _css\n", "path": "plugin/core/css.py"}, {"content": "from .core.protocol import InlayHint\nfrom .core.protocol import InlayHintLabelPart\nfrom .core.protocol import MarkupContent\nfrom .core.protocol import Point\nfrom .core.protocol import Request\nfrom .core.registry import LspTextCommand\nfrom .core.registry import LspWindowCommand\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.typing import cast, Optional, Union\nfrom .core.views import point_to_offset\nfrom .formatting import apply_text_edits_to_view\nimport html\nimport sublime\nimport uuid\n\n\nclass LspToggleInlayHintsCommand(LspWindowCommand):\n capability = 'inlayHintProvider'\n\n def run(self, enable: Optional[bool] = None) -> None:\n if not isinstance(enable, bool):\n enable = not self.are_enabled(self.window)\n self.window.settings().set('lsp_show_inlay_hints', enable)\n status = 'on' if enable else 'off'\n sublime.status_message('Inlay Hints are {}'.format(status))\n for session in self.sessions():\n for sv in session.session_views_async():\n sv.session_buffer.do_inlay_hints_async(sv.view)\n\n def is_checked(self) -> bool:\n return self.are_enabled(self.window)\n\n @classmethod\n def are_enabled(cls, window: Optional[sublime.Window]) -> bool:\n if not window:\n return userprefs().show_inlay_hints\n return bool(window.settings().get('lsp_show_inlay_hints', userprefs().show_inlay_hints))\n\n\nclass LspInlayHintClickCommand(LspTextCommand):\n capability = 'inlayHintProvider'\n\n def run(self, _edit: sublime.Edit, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n event: Optional[dict] = None, label_part: Optional[InlayHintLabelPart] = None) -> None:\n # Insert textEdits for the given inlay hint.\n # If a InlayHintLabelPart was clicked, label_part will be passed as an argument to the LspInlayHintClickCommand\n # and InlayHintLabelPart.command will be executed.\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if session and session.has_capability('inlayHintProvider.resolveProvider'):\n request = Request.resolveInlayHint(inlay_hint, self.view)\n session.send_request_async(\n request,\n lambda response: self.handle(session_name, response, phantom_uuid, label_part))\n return\n self.handle(session_name, inlay_hint, phantom_uuid, label_part)\n\n def handle(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n label_part: Optional[InlayHintLabelPart] = None) -> None:\n self.handle_inlay_hint_text_edits(session_name, inlay_hint, phantom_uuid)\n self.handle_label_part_command(session_name, label_part)\n\n def handle_inlay_hint_text_edits(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str) -> None:\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if not session:\n return\n text_edits = inlay_hint.get('textEdits')\n if not text_edits:\n return\n for sb in session.session_buffers_async():\n sb.remove_inlay_hint_phantom(phantom_uuid)\n apply_text_edits_to_view(text_edits, self.view)\n\n def handle_label_part_command(self, session_name: str, label_part: Optional[InlayHintLabelPart] = None) -> None:\n if not label_part:\n return\n command = label_part.get('command')\n if not command:\n return\n args = {\n \"session_name\": session_name,\n \"command_name\": command[\"command\"],\n \"command_args\": command.get(\"arguments\")\n }\n self.view.run_command(\"lsp_execute\", args)\n\n\ndef inlay_hint_to_phantom(view: sublime.View, inlay_hint: InlayHint, session: Session) -> sublime.Phantom:\n position = inlay_hint[\"position\"]\n region = sublime.Region(point_to_offset(Point.from_lsp(position), view))\n phantom_uuid = str(uuid.uuid4())\n content = get_inlay_hint_html(view, inlay_hint, session, phantom_uuid)\n p = sublime.Phantom(region, content, sublime.LAYOUT_INLINE)\n setattr(p, 'lsp_uuid', phantom_uuid)\n return p\n\n\ndef get_inlay_hint_html(view: sublime.View, inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n label = format_inlay_hint_label(inlay_hint, session, phantom_uuid)\n font = view.settings().get('font_face') or \"monospace\"\n html = \"\"\"\n <body id=\"lsp-inlay-hint\">\n <style>\n .inlay-hint {{\n color: color(var(--foreground) alpha(0.6));\n background-color: color(var(--foreground) alpha(0.08));\n border-radius: 4px;\n padding: 0.05em 4px;\n font-size: 0.9em;\n font-family: {font};\n }}\n\n .inlay-hint a {{\n color: color(var(--foreground) alpha(0.6));\n text-decoration: none;\n }}\n </style>\n <div class=\"inlay-hint\">\n {label}\n </div>\n </body>\n \"\"\".format(\n font=font,\n label=label\n )\n return html\n\n\ndef format_inlay_hint_tooltip(tooltip: Optional[Union[str, MarkupContent]]) -> str:\n if isinstance(tooltip, str):\n return html.escape(tooltip)\n if isinstance(tooltip, dict): # MarkupContent\n return html.escape(tooltip.get('value') or \"\")\n return \"\"\n\n\ndef format_inlay_hint_label(inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n tooltip = format_inlay_hint_tooltip(inlay_hint.get(\"tooltip\"))\n result = \"\"\n can_resolve_inlay_hint = session.has_capability('inlayHintProvider.resolveProvider')\n label = inlay_hint['label']\n has_text_edits = bool(inlay_hint.get('textEdits'))\n is_clickable = has_text_edits or can_resolve_inlay_hint\n if isinstance(label, str):\n if is_clickable:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid\n }\n })\n result += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n instruction_text = '\\nDouble-click to insert' if has_text_edits else \"\"\n result += '<span title=\"{tooltip}\">{value}</span>'.format(\n tooltip=(tooltip + instruction_text).strip(),\n value=html.escape(label)\n )\n if is_clickable:\n result += \"</a>\"\n return result\n\n for label_part in label:\n value = \"\"\n tooltip = format_inlay_hint_tooltip(label_part.get(\"tooltip\"))\n has_command = bool(label_part.get('command'))\n if has_command:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid,\n 'label_part': cast(dict, label_part)\n }\n })\n value += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n value += html.escape(label_part['value'])\n if has_command:\n value += \"</a>\"\n # InlayHintLabelPart.location is not supported\n instruction_text = '\\nDouble-click to execute' if has_command else \"\"\n result += \"<span title=\\\"{tooltip}\\\">{value}</span>\".format(\n tooltip=(tooltip + instruction_text).strip(),\n value=value\n )\n return result\n", "path": "plugin/inlay_hint.py"}]} | 3,231 | 408 |
gh_patches_debug_25097 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-1628 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'project-runner get' command should not exist
## Description of the problem, including code/CLI snippet
The library allows to query `/projects/:projectId/runners/:id` URL through `gitlab.v4.objects.ProjectRunnerManager` class, but the official Gitlab API does not describe this endpoint, which does not exists.
Project's runners list :
```bash
$ gitlab -d -o json project-runner list --all --project-id 76644
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1" 200 None
send: b'GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 200 OK\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:08:59 GMT
header: Content-Type: application/json
header: Transfer-Encoding: chunked
header: Connection: keep-alive
header: Vary: Accept-Encoding
header: Cache-Control: max-age=0, private, must-revalidate
header: Etag: W/"1ae5eff0bd18cd69fc1aa3325e259422"
header: Link: [hidden content]
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: continuous_integration
header: X-Next-Page:
header: X-Page: 1
header: X-Per-Page: 20
header: X-Prev-Page:
header: X-Request-Id: 01FG7B4MD17BPMDZXG2XHFFK4Q
header: X-Runtime: 0.072099
header: X-Total: 2
header: X-Total-Pages: 1
header: Strict-Transport-Security: max-age=31536000
header: Referrer-Policy: strict-origin-when-cross-origin
header: Content-Encoding: gzip
[{"id": 7723, "description": "<description>", "ip_address": "<ip address>", "active": true, "is_shared": false, "name": "gitlab-runner", "online": false, "status": "offline"}, {"id": 11737, "description": "shared-gitlab-runner-77d49f579-zpr98", "ip_address": "<ip address>", "active": true, "is_shared": true, "name": "gitlab-runner", "online": true, "status": "online"}]
```
Invalid query :
```bash
$ gitlab -d -o json project-runner get --project-id 76644 --id 7723
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1" 404 25
Impossible to get object (404: 404 Not Found)
send: b'GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 404 Not Found\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:11:44 GMT
header: Content-Type: application/json
header: Content-Length: 25
header: Connection: keep-alive
header: Cache-Control: no-cache
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: not_owned
header: X-Request-Id: 01FG7B9MZ0K19YPZ4MQJ390PK1
header: X-Runtime: 0.019269
```
Valid query (which already exists) : GET /runner/7723
```bash
$ gitlab -d -o json runner get --id 7723
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/runners/7723 HTTP/1.1" 200 None
send: b'GET /api/v4/runners/7723 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 200 OK\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:11:34 GMT
header: Content-Type: application/json
header: Transfer-Encoding: chunked
header: Connection: keep-alive
header: Vary: Accept-Encoding
header: Cache-Control: max-age=0, private, must-revalidate
header: Etag: W/"7506c48834a201e1d86531647d32b044"
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: continuous_integration
header: X-Request-Id: 01FG7B9B43ME9BMFFZTFM8A20H
header: X-Runtime: 0.124597
header: Strict-Transport-Security: max-age=31536000
header: Referrer-Policy: strict-origin-when-cross-origin
header: Content-Encoding: gzip
{"id": 7723, "description": "<description>", "ip_address": "<id address>", "active": true, "is_shared": false, "name": "gitlab-runner", "online": false, "status": "offline", "tag_list": [], "run_untagged": true, "locked": false, "maximum_timeout": null, "access_level": "not_protected", "version": "13.11.0", "revision": "7f7a4bb0", "platform": "linux", "architecture": "amd64", "contacted_at": "2021-04-23T13:03:09.789Z", "projects": [projects list], "groups": []}
```
If I correctly understand the library, I think that `ProjectRunnerManager` class should not inherits `GetMixin` class (which comes through `NoUpdateMixin`), but only `ListMixin`, `CreateMixin` and `DeleteMixin` classes (and `RESTManager` class, of course).
The unit tests do not cover the `project-runner get` command (this could have revelead the bug earlier).
Should I open a PR to fix this issue ?
## Expected Behavior
The `gitlab projet-runner get` command should not exist.
## Actual Behavior
The `gitlab projet-runner get` command exists.
## Specifications
- python-gitlab version: 2.10.1
- API version you are using (v3/v4): v4
- Gitlab server version (or gitlab.com): 13.12.1-ee
</issue>
<code>
[start of gitlab/v4/objects/runners.py]
1 from gitlab import cli
2 from gitlab import exceptions as exc
3 from gitlab import types
4 from gitlab.base import RequiredOptional, RESTManager, RESTObject
5 from gitlab.mixins import (
6 CRUDMixin,
7 ListMixin,
8 NoUpdateMixin,
9 ObjectDeleteMixin,
10 SaveMixin,
11 )
12
13 __all__ = [
14 "RunnerJob",
15 "RunnerJobManager",
16 "Runner",
17 "RunnerManager",
18 "GroupRunner",
19 "GroupRunnerManager",
20 "ProjectRunner",
21 "ProjectRunnerManager",
22 ]
23
24
25 class RunnerJob(RESTObject):
26 pass
27
28
29 class RunnerJobManager(ListMixin, RESTManager):
30 _path = "/runners/%(runner_id)s/jobs"
31 _obj_cls = RunnerJob
32 _from_parent_attrs = {"runner_id": "id"}
33 _list_filters = ("status",)
34
35
36 class Runner(SaveMixin, ObjectDeleteMixin, RESTObject):
37 jobs: RunnerJobManager
38
39
40 class RunnerManager(CRUDMixin, RESTManager):
41 _path = "/runners"
42 _obj_cls = Runner
43 _create_attrs = RequiredOptional(
44 required=("token",),
45 optional=(
46 "description",
47 "info",
48 "active",
49 "locked",
50 "run_untagged",
51 "tag_list",
52 "access_level",
53 "maximum_timeout",
54 ),
55 )
56 _update_attrs = RequiredOptional(
57 optional=(
58 "description",
59 "active",
60 "tag_list",
61 "run_untagged",
62 "locked",
63 "access_level",
64 "maximum_timeout",
65 ),
66 )
67 _list_filters = ("scope", "tag_list")
68 _types = {"tag_list": types.ListAttribute}
69
70 @cli.register_custom_action("RunnerManager", tuple(), ("scope",))
71 @exc.on_http_error(exc.GitlabListError)
72 def all(self, scope=None, **kwargs):
73 """List all the runners.
74
75 Args:
76 scope (str): The scope of runners to show, one of: specific,
77 shared, active, paused, online
78 all (bool): If True, return all the items, without pagination
79 per_page (int): Number of items to retrieve per request
80 page (int): ID of the page to return (starts with page 1)
81 as_list (bool): If set to False and no pagination option is
82 defined, return a generator instead of a list
83 **kwargs: Extra options to send to the server (e.g. sudo)
84
85 Raises:
86 GitlabAuthenticationError: If authentication is not correct
87 GitlabListError: If the server failed to perform the request
88
89 Returns:
90 list(Runner): a list of runners matching the scope.
91 """
92 path = "/runners/all"
93 query_data = {}
94 if scope is not None:
95 query_data["scope"] = scope
96 obj = self.gitlab.http_list(path, query_data, **kwargs)
97 return [self._obj_cls(self, item) for item in obj]
98
99 @cli.register_custom_action("RunnerManager", ("token",))
100 @exc.on_http_error(exc.GitlabVerifyError)
101 def verify(self, token, **kwargs):
102 """Validates authentication credentials for a registered Runner.
103
104 Args:
105 token (str): The runner's authentication token
106 **kwargs: Extra options to send to the server (e.g. sudo)
107
108 Raises:
109 GitlabAuthenticationError: If authentication is not correct
110 GitlabVerifyError: If the server failed to verify the token
111 """
112 path = "/runners/verify"
113 post_data = {"token": token}
114 self.gitlab.http_post(path, post_data=post_data, **kwargs)
115
116
117 class GroupRunner(ObjectDeleteMixin, RESTObject):
118 pass
119
120
121 class GroupRunnerManager(NoUpdateMixin, RESTManager):
122 _path = "/groups/%(group_id)s/runners"
123 _obj_cls = GroupRunner
124 _from_parent_attrs = {"group_id": "id"}
125 _create_attrs = RequiredOptional(required=("runner_id",))
126 _list_filters = ("scope", "tag_list")
127 _types = {"tag_list": types.ListAttribute}
128
129
130 class ProjectRunner(ObjectDeleteMixin, RESTObject):
131 pass
132
133
134 class ProjectRunnerManager(NoUpdateMixin, RESTManager):
135 _path = "/projects/%(project_id)s/runners"
136 _obj_cls = ProjectRunner
137 _from_parent_attrs = {"project_id": "id"}
138 _create_attrs = RequiredOptional(required=("runner_id",))
139 _list_filters = ("scope", "tag_list")
140 _types = {"tag_list": types.ListAttribute}
141
[end of gitlab/v4/objects/runners.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gitlab/v4/objects/runners.py b/gitlab/v4/objects/runners.py
--- a/gitlab/v4/objects/runners.py
+++ b/gitlab/v4/objects/runners.py
@@ -3,9 +3,10 @@
from gitlab import types
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import (
+ CreateMixin,
CRUDMixin,
+ DeleteMixin,
ListMixin,
- NoUpdateMixin,
ObjectDeleteMixin,
SaveMixin,
)
@@ -114,11 +115,11 @@
self.gitlab.http_post(path, post_data=post_data, **kwargs)
-class GroupRunner(ObjectDeleteMixin, RESTObject):
+class GroupRunner(RESTObject):
pass
-class GroupRunnerManager(NoUpdateMixin, RESTManager):
+class GroupRunnerManager(ListMixin, RESTManager):
_path = "/groups/%(group_id)s/runners"
_obj_cls = GroupRunner
_from_parent_attrs = {"group_id": "id"}
@@ -131,7 +132,7 @@
pass
-class ProjectRunnerManager(NoUpdateMixin, RESTManager):
+class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):
_path = "/projects/%(project_id)s/runners"
_obj_cls = ProjectRunner
_from_parent_attrs = {"project_id": "id"}
| {"golden_diff": "diff --git a/gitlab/v4/objects/runners.py b/gitlab/v4/objects/runners.py\n--- a/gitlab/v4/objects/runners.py\n+++ b/gitlab/v4/objects/runners.py\n@@ -3,9 +3,10 @@\n from gitlab import types\n from gitlab.base import RequiredOptional, RESTManager, RESTObject\n from gitlab.mixins import (\n+ CreateMixin,\n CRUDMixin,\n+ DeleteMixin,\n ListMixin,\n- NoUpdateMixin,\n ObjectDeleteMixin,\n SaveMixin,\n )\n@@ -114,11 +115,11 @@\n self.gitlab.http_post(path, post_data=post_data, **kwargs)\n \n \n-class GroupRunner(ObjectDeleteMixin, RESTObject):\n+class GroupRunner(RESTObject):\n pass\n \n \n-class GroupRunnerManager(NoUpdateMixin, RESTManager):\n+class GroupRunnerManager(ListMixin, RESTManager):\n _path = \"/groups/%(group_id)s/runners\"\n _obj_cls = GroupRunner\n _from_parent_attrs = {\"group_id\": \"id\"}\n@@ -131,7 +132,7 @@\n pass\n \n \n-class ProjectRunnerManager(NoUpdateMixin, RESTManager):\n+class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):\n _path = \"/projects/%(project_id)s/runners\"\n _obj_cls = ProjectRunner\n _from_parent_attrs = {\"project_id\": \"id\"}\n", "issue": "'project-runner get' command should not exist\n## Description of the problem, including code/CLI snippet\r\nThe library allows to query `/projects/:projectId/runners/:id` URL through `gitlab.v4.objects.ProjectRunnerManager` class, but the official Gitlab API does not describe this endpoint, which does not exists.\r\n\r\nProject's runners list :\r\n```bash\r\n$ gitlab -d -o json project-runner list --all --project-id 76644\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\" 200 None\r\nsend: b'GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 200 OK\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:08:59 GMT\r\nheader: Content-Type: application/json\r\nheader: Transfer-Encoding: chunked\r\nheader: Connection: keep-alive\r\nheader: Vary: Accept-Encoding\r\nheader: Cache-Control: max-age=0, private, must-revalidate\r\nheader: Etag: W/\"1ae5eff0bd18cd69fc1aa3325e259422\"\r\nheader: Link: [hidden content]\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: continuous_integration\r\nheader: X-Next-Page:\r\nheader: X-Page: 1\r\nheader: X-Per-Page: 20\r\nheader: X-Prev-Page:\r\nheader: X-Request-Id: 01FG7B4MD17BPMDZXG2XHFFK4Q\r\nheader: X-Runtime: 0.072099\r\nheader: X-Total: 2\r\nheader: X-Total-Pages: 1\r\nheader: Strict-Transport-Security: max-age=31536000\r\nheader: Referrer-Policy: strict-origin-when-cross-origin\r\nheader: Content-Encoding: gzip\r\n[{\"id\": 7723, \"description\": \"<description>\", \"ip_address\": \"<ip address>\", \"active\": true, \"is_shared\": false, \"name\": \"gitlab-runner\", \"online\": false, \"status\": \"offline\"}, {\"id\": 11737, \"description\": \"shared-gitlab-runner-77d49f579-zpr98\", \"ip_address\": \"<ip address>\", \"active\": true, \"is_shared\": true, \"name\": \"gitlab-runner\", \"online\": true, \"status\": \"online\"}]\r\n```\r\n\r\nInvalid query :\r\n```bash\r\n$ gitlab -d -o json project-runner get --project-id 76644 --id 7723\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\" 404 25\r\nImpossible to get object (404: 404 Not Found)\r\nsend: b'GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 404 Not Found\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:11:44 GMT\r\nheader: Content-Type: application/json\r\nheader: Content-Length: 25\r\nheader: Connection: keep-alive\r\nheader: Cache-Control: no-cache\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: not_owned\r\nheader: X-Request-Id: 01FG7B9MZ0K19YPZ4MQJ390PK1\r\nheader: X-Runtime: 0.019269\r\n```\r\n\r\nValid query (which already exists) : GET /runner/7723\r\n```bash\r\n$ gitlab -d -o json runner get --id 7723\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/runners/7723 HTTP/1.1\" 200 None\r\nsend: b'GET /api/v4/runners/7723 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 200 OK\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:11:34 GMT\r\nheader: Content-Type: application/json\r\nheader: Transfer-Encoding: chunked\r\nheader: Connection: keep-alive\r\nheader: Vary: Accept-Encoding\r\nheader: Cache-Control: max-age=0, private, must-revalidate\r\nheader: Etag: W/\"7506c48834a201e1d86531647d32b044\"\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: continuous_integration\r\nheader: X-Request-Id: 01FG7B9B43ME9BMFFZTFM8A20H\r\nheader: X-Runtime: 0.124597\r\nheader: Strict-Transport-Security: max-age=31536000\r\nheader: Referrer-Policy: strict-origin-when-cross-origin\r\nheader: Content-Encoding: gzip\r\n{\"id\": 7723, \"description\": \"<description>\", \"ip_address\": \"<id address>\", \"active\": true, \"is_shared\": false, \"name\": \"gitlab-runner\", \"online\": false, \"status\": \"offline\", \"tag_list\": [], \"run_untagged\": true, \"locked\": false, \"maximum_timeout\": null, \"access_level\": \"not_protected\", \"version\": \"13.11.0\", \"revision\": \"7f7a4bb0\", \"platform\": \"linux\", \"architecture\": \"amd64\", \"contacted_at\": \"2021-04-23T13:03:09.789Z\", \"projects\": [projects list], \"groups\": []}\r\n```\r\n\r\nIf I correctly understand the library, I think that `ProjectRunnerManager` class should not inherits `GetMixin` class (which comes through `NoUpdateMixin`), but only `ListMixin`, `CreateMixin` and `DeleteMixin` classes (and `RESTManager` class, of course).\r\n\r\nThe unit tests do not cover the `project-runner get` command (this could have revelead the bug earlier).\r\n\r\nShould I open a PR to fix this issue ?\r\n\r\n## Expected Behavior\r\nThe `gitlab projet-runner get` command should not exist.\r\n\r\n## Actual Behavior\r\nThe `gitlab projet-runner get` command exists.\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 2.10.1\r\n - API version you are using (v3/v4): v4\r\n - Gitlab server version (or gitlab.com): 13.12.1-ee\r\n\n", "before_files": [{"content": "from gitlab import cli\nfrom gitlab import exceptions as exc\nfrom gitlab import types\nfrom gitlab.base import RequiredOptional, RESTManager, RESTObject\nfrom gitlab.mixins import (\n CRUDMixin,\n ListMixin,\n NoUpdateMixin,\n ObjectDeleteMixin,\n SaveMixin,\n)\n\n__all__ = [\n \"RunnerJob\",\n \"RunnerJobManager\",\n \"Runner\",\n \"RunnerManager\",\n \"GroupRunner\",\n \"GroupRunnerManager\",\n \"ProjectRunner\",\n \"ProjectRunnerManager\",\n]\n\n\nclass RunnerJob(RESTObject):\n pass\n\n\nclass RunnerJobManager(ListMixin, RESTManager):\n _path = \"/runners/%(runner_id)s/jobs\"\n _obj_cls = RunnerJob\n _from_parent_attrs = {\"runner_id\": \"id\"}\n _list_filters = (\"status\",)\n\n\nclass Runner(SaveMixin, ObjectDeleteMixin, RESTObject):\n jobs: RunnerJobManager\n\n\nclass RunnerManager(CRUDMixin, RESTManager):\n _path = \"/runners\"\n _obj_cls = Runner\n _create_attrs = RequiredOptional(\n required=(\"token\",),\n optional=(\n \"description\",\n \"info\",\n \"active\",\n \"locked\",\n \"run_untagged\",\n \"tag_list\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _update_attrs = RequiredOptional(\n optional=(\n \"description\",\n \"active\",\n \"tag_list\",\n \"run_untagged\",\n \"locked\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n @cli.register_custom_action(\"RunnerManager\", tuple(), (\"scope\",))\n @exc.on_http_error(exc.GitlabListError)\n def all(self, scope=None, **kwargs):\n \"\"\"List all the runners.\n\n Args:\n scope (str): The scope of runners to show, one of: specific,\n shared, active, paused, online\n all (bool): If True, return all the items, without pagination\n per_page (int): Number of items to retrieve per request\n page (int): ID of the page to return (starts with page 1)\n as_list (bool): If set to False and no pagination option is\n defined, return a generator instead of a list\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabListError: If the server failed to perform the request\n\n Returns:\n list(Runner): a list of runners matching the scope.\n \"\"\"\n path = \"/runners/all\"\n query_data = {}\n if scope is not None:\n query_data[\"scope\"] = scope\n obj = self.gitlab.http_list(path, query_data, **kwargs)\n return [self._obj_cls(self, item) for item in obj]\n\n @cli.register_custom_action(\"RunnerManager\", (\"token\",))\n @exc.on_http_error(exc.GitlabVerifyError)\n def verify(self, token, **kwargs):\n \"\"\"Validates authentication credentials for a registered Runner.\n\n Args:\n token (str): The runner's authentication token\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabVerifyError: If the server failed to verify the token\n \"\"\"\n path = \"/runners/verify\"\n post_data = {\"token\": token}\n self.gitlab.http_post(path, post_data=post_data, **kwargs)\n\n\nclass GroupRunner(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass GroupRunnerManager(NoUpdateMixin, RESTManager):\n _path = \"/groups/%(group_id)s/runners\"\n _obj_cls = GroupRunner\n _from_parent_attrs = {\"group_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n\nclass ProjectRunner(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass ProjectRunnerManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/runners\"\n _obj_cls = ProjectRunner\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n", "path": "gitlab/v4/objects/runners.py"}]} | 3,744 | 315 |
gh_patches_debug_26912 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor technical feedback
The current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains "technisch" or "technical". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.
</issue>
<code>
[start of src/cms/models/__init__.py]
1 from .config.configuration import Configuration
2
3 from .events.event import Event
4 from .events.event_translation import EventTranslation
5 from .events.recurrence_rule import RecurrenceRule
6
7 from .offers.offer import Offer
8 from .offers.offer_template import OfferTemplate
9
10 from .feedback.event_feedback import EventFeedback
11 from .feedback.offer_feedback import OfferFeedback
12 from .feedback.feedback import Feedback
13 from .feedback.page_feedback import PageFeedback
14 from .feedback.region_feedback import RegionFeedback
15 from .feedback.search_result_feedback import SearchResultFeedback
16 from .feedback.technical_feedback import TechnicalFeedback
17
18 from .languages.language import Language
19 from .languages.language_tree_node import LanguageTreeNode
20
21 from .media.document import Document
22
23 from .pages.page import Page
24 from .pages.page_translation import PageTranslation
25
26 from .pois.poi import POI
27 from .pois.poi_translation import POITranslation
28
29 from .push_notifications.push_notification import PushNotification
30 from .push_notifications.push_notification_translation import PushNotificationTranslation
31
32 from .regions.region import Region
33
34 from .users.organization import Organization
35 from .users.user_profile import UserProfile
36 from .users.user_mfa import UserMfa
37
[end of src/cms/models/__init__.py]
[start of src/cms/models/feedback/feedback.py]
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6
7 class Feedback(models.Model):
8 """
9 Base class for collecting feeedback from users.
10 """
11 EMOTION = (
12 ("Pos", "Positive"),
13 ("Neg", "Negative"),
14 ("NA", "Not Available"),
15 )
16 emotion = models.CharField(max_length=3, choices=EMOTION)
17 comment = models.CharField(max_length=1000)
18 readStatus = models.BooleanField(default=False)
19
20 created_date = models.DateTimeField(auto_now_add=True)
21 last_updated = models.DateTimeField(auto_now=True)
22
23 class Meta:
24 default_permissions = ()
25 permissions = (
26 ('view_feedback', 'Can view feedback'),
27 )
28
[end of src/cms/models/feedback/feedback.py]
[start of src/cms/models/feedback/technical_feedback.py]
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6 from .feedback import Feedback
7 from ..pages.page import Page
8
9
10 class TechnicalFeedback(Feedback):
11 """
12 Technical feedback on the end user app
13 """
14 page = models.ForeignKey(Page, on_delete=models.CASCADE)
15
16 class Meta:
17 default_permissions = ()
18
[end of src/cms/models/feedback/technical_feedback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py
--- a/src/cms/models/__init__.py
+++ b/src/cms/models/__init__.py
@@ -13,7 +13,6 @@
from .feedback.page_feedback import PageFeedback
from .feedback.region_feedback import RegionFeedback
from .feedback.search_result_feedback import SearchResultFeedback
-from .feedback.technical_feedback import TechnicalFeedback
from .languages.language import Language
from .languages.language_tree_node import LanguageTreeNode
diff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py
--- a/src/cms/models/feedback/feedback.py
+++ b/src/cms/models/feedback/feedback.py
@@ -15,7 +15,8 @@
)
emotion = models.CharField(max_length=3, choices=EMOTION)
comment = models.CharField(max_length=1000)
- readStatus = models.BooleanField(default=False)
+ is_technical = models.BooleanField(default=False)
+ read_status = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
diff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py
deleted file mode 100644
--- a/src/cms/models/feedback/technical_feedback.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Module for models storing feedback from front end users
-"""
-from django.db import models
-
-from .feedback import Feedback
-from ..pages.page import Page
-
-
-class TechnicalFeedback(Feedback):
- """
- Technical feedback on the end user app
- """
- page = models.ForeignKey(Page, on_delete=models.CASCADE)
-
- class Meta:
- default_permissions = ()
| {"golden_diff": "diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py\n--- a/src/cms/models/__init__.py\n+++ b/src/cms/models/__init__.py\n@@ -13,7 +13,6 @@\n from .feedback.page_feedback import PageFeedback\n from .feedback.region_feedback import RegionFeedback\n from .feedback.search_result_feedback import SearchResultFeedback\n-from .feedback.technical_feedback import TechnicalFeedback\n \n from .languages.language import Language\n from .languages.language_tree_node import LanguageTreeNode\ndiff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py\n--- a/src/cms/models/feedback/feedback.py\n+++ b/src/cms/models/feedback/feedback.py\n@@ -15,7 +15,8 @@\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n- readStatus = models.BooleanField(default=False)\n+ is_technical = models.BooleanField(default=False)\n+ read_status = models.BooleanField(default=False)\n \n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\ndiff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py\ndeleted file mode 100644\n--- a/src/cms/models/feedback/technical_feedback.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-\"\"\"\n-Module for models storing feedback from front end users\n-\"\"\"\n-from django.db import models\n-\n-from .feedback import Feedback\n-from ..pages.page import Page\n-\n-\n-class TechnicalFeedback(Feedback):\n- \"\"\"\n- Technical feedback on the end user app\n- \"\"\"\n- page = models.ForeignKey(Page, on_delete=models.CASCADE)\n-\n- class Meta:\n- default_permissions = ()\n", "issue": "Refactor technical feedback\nThe current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains \"technisch\" or \"technical\". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.\n", "before_files": [{"content": "from .config.configuration import Configuration\n\nfrom .events.event import Event\nfrom .events.event_translation import EventTranslation\nfrom .events.recurrence_rule import RecurrenceRule\n\nfrom .offers.offer import Offer\nfrom .offers.offer_template import OfferTemplate\n\nfrom .feedback.event_feedback import EventFeedback\nfrom .feedback.offer_feedback import OfferFeedback\nfrom .feedback.feedback import Feedback\nfrom .feedback.page_feedback import PageFeedback\nfrom .feedback.region_feedback import RegionFeedback\nfrom .feedback.search_result_feedback import SearchResultFeedback\nfrom .feedback.technical_feedback import TechnicalFeedback\n\nfrom .languages.language import Language\nfrom .languages.language_tree_node import LanguageTreeNode\n\nfrom .media.document import Document\n\nfrom .pages.page import Page\nfrom .pages.page_translation import PageTranslation\n\nfrom .pois.poi import POI\nfrom .pois.poi_translation import POITranslation\n\nfrom .push_notifications.push_notification import PushNotification\nfrom .push_notifications.push_notification_translation import PushNotificationTranslation\n\nfrom .regions.region import Region\n\nfrom .users.organization import Organization\nfrom .users.user_profile import UserProfile\nfrom .users.user_mfa import UserMfa\n", "path": "src/cms/models/__init__.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\n\nclass Feedback(models.Model):\n \"\"\"\n Base class for collecting feeedback from users.\n \"\"\"\n EMOTION = (\n (\"Pos\", \"Positive\"),\n (\"Neg\", \"Negative\"),\n (\"NA\", \"Not Available\"),\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n readStatus = models.BooleanField(default=False)\n\n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('view_feedback', 'Can view feedback'),\n )\n", "path": "src/cms/models/feedback/feedback.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\nfrom .feedback import Feedback\nfrom ..pages.page import Page\n\n\nclass TechnicalFeedback(Feedback):\n \"\"\"\n Technical feedback on the end user app\n \"\"\"\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n class Meta:\n default_permissions = ()\n", "path": "src/cms/models/feedback/technical_feedback.py"}]} | 1,294 | 394 |
gh_patches_debug_7917 | rasdani/github-patches | git_diff | bokeh__bokeh-6159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
For Interactive Legends, a note about `muted_color` would be helpful
For the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.
</issue>
<code>
[start of sphinx/source/docs/user_guide/examples/interaction_legend_mute.py]
1 import pandas as pd
2
3 from bokeh.palettes import Spectral4
4 from bokeh.plotting import figure, output_file, show
5
6 p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
7 p.title.text = 'Click on legend entries to mute the corresponding lines'
8
9 for name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):
10 df = pd.read_csv(
11 "http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
12 parse_dates=['Date']
13 )
14 p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
15
16 p.legend.location = "top_left"
17 p.legend.click_policy="mute"
18
19 output_file("interactive_legend.html", title="interactive_legend.py example")
20
21 show(p)
22
[end of sphinx/source/docs/user_guide/examples/interaction_legend_mute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
@@ -11,7 +11,8 @@
"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
parse_dates=['Date']
)
- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,
+ muted_color=color, muted_alpha=0.2, legend=name)
p.legend.location = "top_left"
p.legend.click_policy="mute"
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n@@ -11,7 +11,8 @@\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,\n+ muted_color=color, muted_alpha=0.2, legend=name)\n \n p.legend.location = \"top_left\"\n p.legend.click_policy=\"mute\"\n", "issue": "For Interactive Legends, a note about `muted_color` would be helpful\nFor the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.\n", "before_files": [{"content": "import pandas as pd\n\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\n\np = figure(plot_width=800, plot_height=250, x_axis_type=\"datetime\")\np.title.text = 'Click on legend entries to mute the corresponding lines'\n\nfor name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):\n df = pd.read_csv(\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n\np.legend.location = \"top_left\"\np.legend.click_policy=\"mute\"\n\noutput_file(\"interactive_legend.html\", title=\"interactive_legend.py example\")\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_legend_mute.py"}]} | 916 | 223 |
gh_patches_debug_57344 | rasdani/github-patches | git_diff | ethereum__consensus-specs-1102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BLS and testing
Decided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.
# BLS and testing
The two pain-points to get a pretty (and large) set of test-vectors out for clients are:
- BLS Signature creation
- BLS Signature verification
And side-issue, but easily resolved:
*efficient creation of a genesis state*:
When BLS functionality is implemented in test-code (creation of signed deposits, and verification).
Solution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).
## Status
Talking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)
### The testing pipeline currently looks like:
- py-spec, calls BLS stub
- test-helpers, don't create self-signed objects with valid signatures
- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))
- py-test runner to run spec-tests, purely for assertions
- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.
### Pytests status:
- move from `tests/` to `eth2spec/test`, i.e. part of package
- removed use of `pytest`
- annotated with `@spec_test` or similar (see PR 1052)
- as part of test-generation effort, yay for shared effort:
- expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)
- slightly faster, less deep-copies
- stuck on BLS stub (no sig creation/verification)
### Test-generation status:
- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)
- `operations` test-gen uses test-package ability to output test-vectors for each test-case
- but no valid signatures
- lack of a definition how to handle this signature problem as a test-consumer
- there are no signature-related testcases
- turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...
- it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.
- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.
- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.
## Possible ways forward:
- Simple but hacky: "turn BLS off for testing"
- No "BLS off", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing
- valid test cases generated with valid signatures
- invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker
- "BLS on", more work (~1 week)
- slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.
- blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)
- We likely still want to mark tests as "signature related" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.
A work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)
Suggestions welcome.
</issue>
<code>
[start of scripts/phase0/build_spec.py]
1 import sys
2 import function_puller
3
4
5 def build_phase0_spec(sourcefile, outfile):
6 code_lines = []
7 code_lines.append("""
8 from typing import (
9 Any,
10 Dict,
11 List,
12 NewType,
13 Tuple,
14 )
15 from eth2spec.utils.minimal_ssz import *
16 from eth2spec.utils.bls_stub import *
17
18 """)
19 for i in (1, 2, 3, 4, 8, 32, 48, 96):
20 code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
21
22 code_lines.append("""
23
24 # stub, will get overwritten by real var
25 SLOTS_PER_EPOCH = 64
26
27
28 Slot = NewType('Slot', int) # uint64
29 Epoch = NewType('Epoch', int) # uint64
30 Shard = NewType('Shard', int) # uint64
31 ValidatorIndex = NewType('ValidatorIndex', int) # uint64
32 Gwei = NewType('Gwei', int) # uint64
33 Bytes32 = NewType('Bytes32', bytes) # bytes32
34 BLSPubkey = NewType('BLSPubkey', bytes) # bytes48
35 BLSSignature = NewType('BLSSignature', bytes) # bytes96
36 Store = None
37 """)
38
39 code_lines += function_puller.get_spec(sourcefile)
40
41 code_lines.append("""
42 # Monkey patch validator compute committee code
43 _compute_committee = compute_committee
44 committee_cache = {}
45
46
47 def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
48 param_hash = (hash_tree_root(indices), seed, index, count)
49
50 if param_hash in committee_cache:
51 return committee_cache[param_hash]
52 else:
53 ret = _compute_committee(indices, seed, index, count)
54 committee_cache[param_hash] = ret
55 return ret
56
57
58 # Monkey patch hash cache
59 _hash = hash
60 hash_cache = {}
61
62
63 def hash(x):
64 if x in hash_cache:
65 return hash_cache[x]
66 else:
67 ret = _hash(x)
68 hash_cache[x] = ret
69 return ret
70
71 # Access to overwrite spec constants based on configuration
72 def apply_constants_preset(preset: Dict[str, Any]):
73 global_vars = globals()
74 for k, v in preset.items():
75 global_vars[k] = v
76
77 # Deal with derived constants
78 global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
79
80 # Initialize SSZ types again, to account for changed lengths
81 init_SSZ_types()
82 """)
83
84 with open(outfile, 'w') as out:
85 out.write("\n".join(code_lines))
86
87
88 if __name__ == '__main__':
89 if len(sys.argv) < 3:
90 print("Usage: <source phase0> <output phase0 pyspec>")
91 build_phase0_spec(sys.argv[1], sys.argv[2])
92
93
[end of scripts/phase0/build_spec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py
--- a/scripts/phase0/build_spec.py
+++ b/scripts/phase0/build_spec.py
@@ -13,7 +13,7 @@
Tuple,
)
from eth2spec.utils.minimal_ssz import *
-from eth2spec.utils.bls_stub import *
+from eth2spec.utils.bls import *
""")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
| {"golden_diff": "diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py\n--- a/scripts/phase0/build_spec.py\n+++ b/scripts/phase0/build_spec.py\n@@ -13,7 +13,7 @@\n Tuple,\n )\n from eth2spec.utils.minimal_ssz import *\n-from eth2spec.utils.bls_stub import *\n+from eth2spec.utils.bls import *\n \n \"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n", "issue": "BLS and testing\nDecided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.\r\n\r\n# BLS and testing\r\n\r\nThe two pain-points to get a pretty (and large) set of test-vectors out for clients are:\r\n- BLS Signature creation\r\n- BLS Signature verification\r\n\r\nAnd side-issue, but easily resolved:\r\n*efficient creation of a genesis state*:\r\nWhen BLS functionality is implemented in test-code (creation of signed deposits, and verification).\r\nSolution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).\r\n\r\n## Status\r\n\r\nTalking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)\r\n\r\n### The testing pipeline currently looks like:\r\n\r\n- py-spec, calls BLS stub\r\n- test-helpers, don't create self-signed objects with valid signatures\r\n- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))\r\n- py-test runner to run spec-tests, purely for assertions\r\n- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.\r\n\r\n### Pytests status:\r\n\r\n- move from `tests/` to `eth2spec/test`, i.e. part of package\r\n - removed use of `pytest`\r\n - annotated with `@spec_test` or similar (see PR 1052)\r\n- as part of test-generation effort, yay for shared effort:\r\n - expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)\r\n - slightly faster, less deep-copies\r\n- stuck on BLS stub (no sig creation/verification)\r\n\r\n### Test-generation status:\r\n\r\n- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)\r\n- `operations` test-gen uses test-package ability to output test-vectors for each test-case\r\n - but no valid signatures\r\n - lack of a definition how to handle this signature problem as a test-consumer\r\n - there are no signature-related testcases\r\n - turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...\r\n - it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.\r\n- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.\r\n- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.\r\n\r\n## Possible ways forward:\r\n\r\n- Simple but hacky: \"turn BLS off for testing\"\r\n- No \"BLS off\", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing\r\n - valid test cases generated with valid signatures\r\n - invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker\r\n- \"BLS on\", more work (~1 week)\r\n - slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.\r\n - blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)\r\n - We likely still want to mark tests as \"signature related\" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.\r\n\r\nA work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)\r\n\r\nSuggestions welcome.\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls_stub import *\n\n\"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n code_lines.append(\"def int_to_bytes%d(x): return x.to_bytes(%d, 'little')\" % (i, i))\n\n code_lines.append(\"\"\"\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}]} | 2,439 | 121 |
gh_patches_debug_23497 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6234 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `modified` time instead of `created` time for recording search queries
We should use `modified` time here and not the `created` time.
`modified` time gets updated automatically, so the query should also be changed.
https://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L213-L214
https://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L159-L163
Ref: [#6088 (comment)](https://github.com/readthedocs/readthedocs.org/pull/6088#discussion_r318738599)
</issue>
<code>
[start of readthedocs/search/tasks.py]
1 import logging
2
3 from dateutil.parser import parse
4 from django.apps import apps
5 from django.utils import timezone
6 from django_elasticsearch_dsl.registries import registry
7
8 from readthedocs.builds.models import Version
9 from readthedocs.projects.models import Project
10 from readthedocs.search.models import SearchQuery
11 from readthedocs.worker import app
12 from .utils import _get_index, _get_document
13
14 log = logging.getLogger(__name__)
15
16
17 @app.task(queue='web')
18 def index_objects_to_es(
19 app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None
20 ):
21
22 if chunk and objects_id:
23 raise ValueError('You can not pass both chunk and objects_id.')
24
25 if not (chunk or objects_id):
26 raise ValueError('You must pass a chunk or objects_id.')
27
28 model = apps.get_model(app_label, model_name)
29 document = _get_document(model=model, document_class=document_class)
30 doc_obj = document()
31
32 # WARNING: This must use the exact same queryset as from where we get the ID's
33 # There is a chance there is a race condition here as the ID's may change as the task runs,
34 # so we need to think through this a bit more and probably pass explicit ID's,
35 # but there are performance issues with that on large model sets
36 queryset = doc_obj.get_queryset()
37 if chunk:
38 # Chunk is a tuple with start and end index of queryset
39 start = chunk[0]
40 end = chunk[1]
41 queryset = queryset[start:end]
42 elif objects_id:
43 queryset = queryset.filter(id__in=objects_id)
44
45 if index_name:
46 # Hack the index name temporarily for reindexing tasks
47 old_index_name = document._doc_type.index
48 document._doc_type.index = index_name
49 log.info('Replacing index name %s with %s', old_index_name, index_name)
50
51 log.info("Indexing model: %s, '%s' objects", model.__name__, queryset.count())
52 doc_obj.update(queryset.iterator())
53
54 if index_name:
55 log.info('Undoing index replacement, settings %s with %s',
56 document._doc_type.index, old_index_name)
57 document._doc_type.index = old_index_name
58
59
60 @app.task(queue='web')
61 def delete_objects_in_es(app_label, model_name, document_class, objects_id):
62 model = apps.get_model(app_label, model_name)
63 document = _get_document(model=model, document_class=document_class)
64 doc_obj = document()
65 queryset = doc_obj.get_queryset()
66 queryset = queryset.filter(id__in=objects_id)
67 log.info("Deleting model: %s, '%s' objects", model.__name__, queryset.count())
68 try:
69 # This is a common case that we should be handling a better way
70 doc_obj.update(queryset.iterator(), action='delete')
71 except Exception:
72 log.warning('Unable to delete a subset of files. Continuing.', exc_info=True)
73
74
75 @app.task(queue='web')
76 def create_new_es_index(app_label, model_name, index_name, new_index_name):
77 model = apps.get_model(app_label, model_name)
78 indices = registry.get_indices(models=[model])
79 old_index = _get_index(indices=indices, index_name=index_name)
80 new_index = old_index.clone(name=new_index_name)
81 new_index.create()
82
83
84 @app.task(queue='web')
85 def switch_es_index(app_label, model_name, index_name, new_index_name):
86 model = apps.get_model(app_label, model_name)
87 indices = registry.get_indices(models=[model])
88 old_index = _get_index(indices=indices, index_name=index_name)
89 new_index = old_index.clone(name=new_index_name)
90 old_index_actual_name = None
91
92 if old_index.exists():
93 # Alias can not be used to delete an index.
94 # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html
95 # So get the index actual name to delete it
96 old_index_info = old_index.get()
97 # The info is a dictionary and the key is the actual name of the index
98 old_index_actual_name = list(old_index_info.keys())[0]
99
100 # Put alias into the new index name and delete the old index if its exist
101 new_index.put_alias(name=index_name)
102 if old_index_actual_name:
103 old_index.connection.indices.delete(index=old_index_actual_name)
104
105
106 @app.task(queue='web')
107 def index_missing_objects(app_label, model_name, document_class, index_generation_time):
108 """
109 Task to insure that none of the object is missed from indexing.
110
111 The object ids are sent to `index_objects_to_es` task for indexing.
112 While the task is running, new objects can be created/deleted in database
113 and they will not be in the tasks for indexing into ES.
114 This task will index all the objects that got into DB after the `latest_indexed` timestamp
115 to ensure that everything is in ES index.
116 """
117 model = apps.get_model(app_label, model_name)
118 document = _get_document(model=model, document_class=document_class)
119 query_string = '{}__lte'.format(document.modified_model_field)
120 queryset = document().get_queryset().exclude(**{query_string: index_generation_time})
121 document().update(queryset.iterator())
122
123 log.info("Indexed %s missing objects from model: %s'", queryset.count(), model.__name__)
124
125 # TODO: Figure out how to remove the objects from ES index that has been deleted
126
127
128 @app.task(queue='web')
129 def delete_old_search_queries_from_db():
130 """
131 Delete old SearchQuery objects.
132
133 This is run by celery beat every day.
134 """
135 last_3_months = timezone.now().date() - timezone.timedelta(days=90)
136 search_queries_qs = SearchQuery.objects.filter(
137 created__date__lte=last_3_months,
138 )
139
140 if search_queries_qs.exists():
141 log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count())
142 search_queries_qs.delete()
143
144
145 @app.task(queue='web')
146 def record_search_query(project_slug, version_slug, query, total_results, time_string):
147 """Record/update search query in database."""
148 if not project_slug or not version_slug or not query:
149 log.debug(
150 'Not recording the search query. Passed arguments: '
151 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (
152 project_slug, version_slug, query, total_results, time_string
153 )
154 )
155 return
156
157 time = parse(time_string)
158 before_10_sec = time - timezone.timedelta(seconds=10)
159 partial_query_qs = SearchQuery.objects.filter(
160 project__slug=project_slug,
161 version__slug=version_slug,
162 created__gte=before_10_sec,
163 ).order_by('-created')
164
165 # check if partial query exists,
166 # if yes, then just update the object.
167 for partial_query in partial_query_qs.iterator():
168 if query.startswith(partial_query.query):
169 partial_query.created = time
170 partial_query.query = query
171 partial_query.save()
172 return
173
174 # don't record query with zero results.
175 if not total_results:
176 log.debug(
177 'Not recording search query because of zero results. Passed arguments: '
178 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (
179 project_slug, version_slug, query, total_results, time
180 )
181 )
182 return
183
184 project = Project.objects.filter(slug=project_slug).first()
185 if not project:
186 log.debug(
187 'Not recording the search query because project does not exist. '
188 'project_slug: %s' % (
189 project_slug
190 )
191 )
192 return
193
194 version_qs = Version.objects.filter(project=project, slug=version_slug)
195
196 if not version_qs.exists():
197 log.debug(
198 'Not recording the search query because version does not exist. '
199 'project_slug: %s, version_slug: %s' % (
200 project_slug, version_slug
201 )
202 )
203 return
204
205 version = version_qs.first()
206
207 # make a new SearchQuery object.
208 obj = SearchQuery.objects.create(
209 project=project,
210 version=version,
211 query=query,
212 )
213 obj.created = time
214 obj.save()
215
[end of readthedocs/search/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/search/tasks.py b/readthedocs/search/tasks.py
--- a/readthedocs/search/tasks.py
+++ b/readthedocs/search/tasks.py
@@ -159,14 +159,13 @@
partial_query_qs = SearchQuery.objects.filter(
project__slug=project_slug,
version__slug=version_slug,
- created__gte=before_10_sec,
- ).order_by('-created')
+ modified__gte=before_10_sec,
+ ).order_by('-modified')
# check if partial query exists,
# if yes, then just update the object.
for partial_query in partial_query_qs.iterator():
if query.startswith(partial_query.query):
- partial_query.created = time
partial_query.query = query
partial_query.save()
return
@@ -205,10 +204,8 @@
version = version_qs.first()
# make a new SearchQuery object.
- obj = SearchQuery.objects.create(
+ SearchQuery.objects.create(
project=project,
version=version,
query=query,
)
- obj.created = time
- obj.save()
| {"golden_diff": "diff --git a/readthedocs/search/tasks.py b/readthedocs/search/tasks.py\n--- a/readthedocs/search/tasks.py\n+++ b/readthedocs/search/tasks.py\n@@ -159,14 +159,13 @@\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n- created__gte=before_10_sec,\n- ).order_by('-created')\n+ modified__gte=before_10_sec,\n+ ).order_by('-modified')\n \n # check if partial query exists,\n # if yes, then just update the object.\n for partial_query in partial_query_qs.iterator():\n if query.startswith(partial_query.query):\n- partial_query.created = time\n partial_query.query = query\n partial_query.save()\n return\n@@ -205,10 +204,8 @@\n version = version_qs.first()\n \n # make a new SearchQuery object.\n- obj = SearchQuery.objects.create(\n+ SearchQuery.objects.create(\n project=project,\n version=version,\n query=query,\n )\n- obj.created = time\n- obj.save()\n", "issue": "Use `modified` time instead of `created` time for recording search queries\nWe should use `modified` time here and not the `created` time.\r\n`modified` time gets updated automatically, so the query should also be changed.\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L213-L214\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L159-L163\r\n\r\nRef: [#6088 (comment)](https://github.com/readthedocs/readthedocs.org/pull/6088#discussion_r318738599)\n", "before_files": [{"content": "import logging\n\nfrom dateutil.parser import parse\nfrom django.apps import apps\nfrom django.utils import timezone\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.search.models import SearchQuery\nfrom readthedocs.worker import app\nfrom .utils import _get_index, _get_document\n\nlog = logging.getLogger(__name__)\n\n\[email protected](queue='web')\ndef index_objects_to_es(\n app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None\n):\n\n if chunk and objects_id:\n raise ValueError('You can not pass both chunk and objects_id.')\n\n if not (chunk or objects_id):\n raise ValueError('You must pass a chunk or objects_id.')\n\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n\n # WARNING: This must use the exact same queryset as from where we get the ID's\n # There is a chance there is a race condition here as the ID's may change as the task runs,\n # so we need to think through this a bit more and probably pass explicit ID's,\n # but there are performance issues with that on large model sets\n queryset = doc_obj.get_queryset()\n if chunk:\n # Chunk is a tuple with start and end index of queryset\n start = chunk[0]\n end = chunk[1]\n queryset = queryset[start:end]\n elif objects_id:\n queryset = queryset.filter(id__in=objects_id)\n\n if index_name:\n # Hack the index name temporarily for reindexing tasks\n old_index_name = document._doc_type.index\n document._doc_type.index = index_name\n log.info('Replacing index name %s with %s', old_index_name, index_name)\n\n log.info(\"Indexing model: %s, '%s' objects\", model.__name__, queryset.count())\n doc_obj.update(queryset.iterator())\n\n if index_name:\n log.info('Undoing index replacement, settings %s with %s',\n document._doc_type.index, old_index_name)\n document._doc_type.index = old_index_name\n\n\[email protected](queue='web')\ndef delete_objects_in_es(app_label, model_name, document_class, objects_id):\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n queryset = doc_obj.get_queryset()\n queryset = queryset.filter(id__in=objects_id)\n log.info(\"Deleting model: %s, '%s' objects\", model.__name__, queryset.count())\n try:\n # This is a common case that we should be handling a better way\n doc_obj.update(queryset.iterator(), action='delete')\n except Exception:\n log.warning('Unable to delete a subset of files. Continuing.', exc_info=True)\n\n\[email protected](queue='web')\ndef create_new_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n new_index.create()\n\n\[email protected](queue='web')\ndef switch_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n old_index_actual_name = None\n\n if old_index.exists():\n # Alias can not be used to delete an index.\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html\n # So get the index actual name to delete it\n old_index_info = old_index.get()\n # The info is a dictionary and the key is the actual name of the index\n old_index_actual_name = list(old_index_info.keys())[0]\n\n # Put alias into the new index name and delete the old index if its exist\n new_index.put_alias(name=index_name)\n if old_index_actual_name:\n old_index.connection.indices.delete(index=old_index_actual_name)\n\n\[email protected](queue='web')\ndef index_missing_objects(app_label, model_name, document_class, index_generation_time):\n \"\"\"\n Task to insure that none of the object is missed from indexing.\n\n The object ids are sent to `index_objects_to_es` task for indexing.\n While the task is running, new objects can be created/deleted in database\n and they will not be in the tasks for indexing into ES.\n This task will index all the objects that got into DB after the `latest_indexed` timestamp\n to ensure that everything is in ES index.\n \"\"\"\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n query_string = '{}__lte'.format(document.modified_model_field)\n queryset = document().get_queryset().exclude(**{query_string: index_generation_time})\n document().update(queryset.iterator())\n\n log.info(\"Indexed %s missing objects from model: %s'\", queryset.count(), model.__name__)\n\n # TODO: Figure out how to remove the objects from ES index that has been deleted\n\n\[email protected](queue='web')\ndef delete_old_search_queries_from_db():\n \"\"\"\n Delete old SearchQuery objects.\n\n This is run by celery beat every day.\n \"\"\"\n last_3_months = timezone.now().date() - timezone.timedelta(days=90)\n search_queries_qs = SearchQuery.objects.filter(\n created__date__lte=last_3_months,\n )\n\n if search_queries_qs.exists():\n log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count())\n search_queries_qs.delete()\n\n\[email protected](queue='web')\ndef record_search_query(project_slug, version_slug, query, total_results, time_string):\n \"\"\"Record/update search query in database.\"\"\"\n if not project_slug or not version_slug or not query:\n log.debug(\n 'Not recording the search query. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time_string\n )\n )\n return\n\n time = parse(time_string)\n before_10_sec = time - timezone.timedelta(seconds=10)\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n created__gte=before_10_sec,\n ).order_by('-created')\n\n # check if partial query exists,\n # if yes, then just update the object.\n for partial_query in partial_query_qs.iterator():\n if query.startswith(partial_query.query):\n partial_query.created = time\n partial_query.query = query\n partial_query.save()\n return\n\n # don't record query with zero results.\n if not total_results:\n log.debug(\n 'Not recording search query because of zero results. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time\n )\n )\n return\n\n project = Project.objects.filter(slug=project_slug).first()\n if not project:\n log.debug(\n 'Not recording the search query because project does not exist. '\n 'project_slug: %s' % (\n project_slug\n )\n )\n return\n\n version_qs = Version.objects.filter(project=project, slug=version_slug)\n\n if not version_qs.exists():\n log.debug(\n 'Not recording the search query because version does not exist. '\n 'project_slug: %s, version_slug: %s' % (\n project_slug, version_slug\n )\n )\n return\n\n version = version_qs.first()\n\n # make a new SearchQuery object.\n obj = SearchQuery.objects.create(\n project=project,\n version=version,\n query=query,\n )\n obj.created = time\n obj.save()\n", "path": "readthedocs/search/tasks.py"}]} | 3,116 | 256 |
gh_patches_debug_13930 | rasdani/github-patches | git_diff | microsoft__ptvsd-1425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
python without debugging won't start if there is a breakpoint
@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)
## Environment data
- VSCode Version: 1.33.1
- OS Version:Linux 5.0.7-200.fc29.x86_64
- Extension version (available under the Extensions sidebar): Python 2019.4.11987
- Anaconda Extension Pack 1.0.1
## Steps to reproduce:
Open Python code
create a breakpoint
run python without debugging (ctrl+F5)
Terminal output:
cd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py
Terminated
Does this issue occur when all extensions are disabled?: Yes/No
Not sure - Python extension is required
## Enabled Extensions:
Bookmarks 10.4.3
C/C++ 0.221
Gist 3.0.3
Git History 0.4.6
GitLens - 9.6.3
markdownlint 0.26.0
Syncing 2.1.6
OpenCL 0.5.2
VEX 0.4.0
TAML 0.4.0
---
@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)
I'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.
</issue>
<code>
[start of src/ptvsd/runner.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import pydevd
6 import threading
7
8 from ptvsd.daemon import DaemonBase
9 from ptvsd.session import DebugSession
10 from ptvsd.wrapper import VSCLifecycleMsgProcessor
11 from pydevd import init_stdout_redirect, init_stderr_redirect
12
13
14 HOSTNAME = 'localhost'
15
16
17 def run(address, filename, is_module, *args, **kwargs):
18 # TODO: docstring
19 # TODO: client/server -> address
20 daemon = Daemon()
21 if not daemon.wait_for_launch(address):
22 return
23
24 debugger = pydevd.PyDB()
25 # We do not want some internal methods to get executed in non-debug mode.
26 debugger.init_matplotlib_support = lambda *arg: None
27 debugger.run(
28 file=filename,
29 globals=None,
30 locals=None,
31 is_module=is_module,
32 set_trace=False)
33
34
35 class Daemon(DaemonBase):
36 """The process-level manager for the VSC protocol debug adapter."""
37
38 LAUNCH_TIMEOUT = 10000 # seconds
39
40 class SESSION(DebugSession):
41 class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
42 def on_invalid_request(self, request, args):
43 self.send_response(request, success=True)
44
45 def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):
46 # TODO: docstring
47 launched = threading.Event()
48 _, start_session = self.start_client(addr)
49 start_session(
50 notify_launch=launched.set,
51 )
52 return launched.wait(timeout)
53
54 def _start(self):
55 import weakref
56 weak_self = weakref.ref(self) # Avoid cyclic ref
57
58 def on_stdout(msg):
59 self = weak_self()
60 if self is not None:
61 self._send_output('stdout', msg)
62
63 def on_stderr(msg):
64 self = weak_self()
65 if self is not None:
66 self._send_output('stderr', msg)
67
68 init_stdout_redirect(on_stdout)
69 init_stderr_redirect(on_stderr)
70 return NoSocket()
71
72 def _close(self):
73 super(Daemon, self)._close()
74
75 def _send_output(self, category, output):
76 if self.session is None:
77 return
78 self.session._msgprocessor.send_event('output',
79 category=category,
80 output=output)
81
82
83 class NoSocket(object):
84 """A object with a noop socket lifecycle."""
85
86 def shutdown(self, *args, **kwargs):
87 pass
88
89 def close(self):
90 pass
91
[end of src/ptvsd/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py
--- a/src/ptvsd/runner.py
+++ b/src/ptvsd/runner.py
@@ -39,6 +39,19 @@
class SESSION(DebugSession):
class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
+
+ def on_setBreakpoints(self, request, args):
+ # Note: breakpoints is required (vscode will terminate
+ # the debugger if that's not the case).
+ # See: https://github.com/microsoft/ptvsd/issues/1408
+ self.send_response(
+ request,
+ success=True,
+ breakpoints=(
+ [{'verified': False}] * len(args.get('breakpoints', ()))
+ )
+ )
+
def on_invalid_request(self, request, args):
self.send_response(request, success=True)
| {"golden_diff": "diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py\n--- a/src/ptvsd/runner.py\n+++ b/src/ptvsd/runner.py\n@@ -39,6 +39,19 @@\n \n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n+\n+ def on_setBreakpoints(self, request, args):\n+ # Note: breakpoints is required (vscode will terminate\n+ # the debugger if that's not the case).\n+ # See: https://github.com/microsoft/ptvsd/issues/1408\n+ self.send_response(\n+ request,\n+ success=True,\n+ breakpoints=(\n+ [{'verified': False}] * len(args.get('breakpoints', ()))\n+ )\n+ )\n+\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n", "issue": "python without debugging won't start if there is a breakpoint\n@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)\n\n## Environment data\r\n- VSCode Version: 1.33.1\r\n- OS Version:Linux 5.0.7-200.fc29.x86_64\r\n- Extension version (available under the Extensions sidebar): Python 2019.4.11987\r\n- Anaconda Extension Pack 1.0.1\r\n\r\n## Steps to reproduce:\r\nOpen Python code\r\ncreate a breakpoint\r\nrun python without debugging (ctrl+F5)\r\nTerminal output:\r\n\r\ncd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py\r\nTerminated\r\nDoes this issue occur when all extensions are disabled?: Yes/No\r\nNot sure - Python extension is required\r\n\r\n## Enabled Extensions:\r\nBookmarks 10.4.3\r\nC/C++ 0.221\r\nGist 3.0.3\r\nGit History 0.4.6\r\nGitLens - 9.6.3\r\nmarkdownlint 0.26.0\r\n\r\nSyncing 2.1.6\r\nOpenCL 0.5.2\r\nVEX 0.4.0\r\nTAML 0.4.0\r\n\r\n\n\n---\n\n@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)\n\nI'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport pydevd\nimport threading\n\nfrom ptvsd.daemon import DaemonBase\nfrom ptvsd.session import DebugSession\nfrom ptvsd.wrapper import VSCLifecycleMsgProcessor\nfrom pydevd import init_stdout_redirect, init_stderr_redirect\n\n\nHOSTNAME = 'localhost'\n\n\ndef run(address, filename, is_module, *args, **kwargs):\n # TODO: docstring\n # TODO: client/server -> address\n daemon = Daemon()\n if not daemon.wait_for_launch(address):\n return\n\n debugger = pydevd.PyDB()\n # We do not want some internal methods to get executed in non-debug mode.\n debugger.init_matplotlib_support = lambda *arg: None\n debugger.run(\n file=filename,\n globals=None,\n locals=None,\n is_module=is_module,\n set_trace=False)\n\n\nclass Daemon(DaemonBase):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n LAUNCH_TIMEOUT = 10000 # seconds\n\n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n\n def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):\n # TODO: docstring\n launched = threading.Event()\n _, start_session = self.start_client(addr)\n start_session(\n notify_launch=launched.set,\n )\n return launched.wait(timeout)\n\n def _start(self):\n import weakref\n weak_self = weakref.ref(self) # Avoid cyclic ref\n\n def on_stdout(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stdout', msg)\n\n def on_stderr(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stderr', msg)\n\n init_stdout_redirect(on_stdout)\n init_stderr_redirect(on_stderr)\n return NoSocket()\n\n def _close(self):\n super(Daemon, self)._close()\n\n def _send_output(self, category, output):\n if self.session is None:\n return\n self.session._msgprocessor.send_event('output',\n category=category,\n output=output)\n\n\nclass NoSocket(object):\n \"\"\"A object with a noop socket lifecycle.\"\"\"\n\n def shutdown(self, *args, **kwargs):\n pass\n\n def close(self):\n pass\n", "path": "src/ptvsd/runner.py"}]} | 1,719 | 201 |
gh_patches_debug_41696 | rasdani/github-patches | git_diff | carpentries__amy-475 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin commands broken
Please check all the admin commands (use `make` on its own to get a list):
```
$ make airports
/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/crispy_forms/utils.py:25: RemovedInDjango19Warning: memoize wrapper is deprecated and will be removed in Django 1.9. Use django.utils.lru_cache instead.
default_field_template = memoize(default_field_template, {}, 1)
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 338, in execute_from_command_line
utility.execute()
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 330, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 190, in fetch_command
klass = load_command_class(app_name, subcommand)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 40, in load_command_class
module = import_module('%s.management.commands.%s' % (app_name, name))
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/importlib/__init__.py", line 90, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1582, in _gcd_import
File "<frozen importlib._bootstrap>", line 1563, in _find_and_load
File "<frozen importlib._bootstrap>", line 1530, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 584, in _check_name_wrapper
File "<frozen importlib._bootstrap>", line 1022, in load_module
File "<frozen importlib._bootstrap>", line 1003, in load_module
File "<frozen importlib._bootstrap>", line 560, in module_for_loader_wrapper
File "<frozen importlib._bootstrap>", line 868, in _load_module
File "<frozen importlib._bootstrap>", line 313, in _call_with_frames_removed
File "/Users/gvwilson/s/amy/workshops/management/commands/export_airports.py", line 3, in <module>
from workshops.views import _export_instructors
ImportError: cannot import name _export_instructors
make: *** [airports] Error 1
```
</issue>
<code>
[start of workshops/management/commands/export_airports.py]
1 import yaml
2 from django.core.management.base import BaseCommand, CommandError
3 from workshops.views import _export_instructors
4
5 class Command(BaseCommand):
6 args = 'no arguments'
7 help = 'Display YAML for airports.'
8
9 def handle(self, *args, **options):
10 print(yaml.dump(_export_instructors()).rstrip())
11
[end of workshops/management/commands/export_airports.py]
[start of workshops/management/commands/export_badges.py]
1 import yaml
2 from django.core.management.base import BaseCommand, CommandError
3 from workshops.views import _export_badges
4
5 class Command(BaseCommand):
6 args = 'no arguments'
7 help = 'Display YAML for badges.'
8
9 def handle(self, *args, **options):
10 print(yaml.dump(_export_badges()).rstrip())
11
[end of workshops/management/commands/export_badges.py]
[start of api/views.py]
1 from django.db.models import Q
2 from rest_framework.generics import ListAPIView
3 from rest_framework.permissions import IsAuthenticatedOrReadOnly
4 from rest_framework.response import Response
5 from rest_framework.reverse import reverse
6 from rest_framework.views import APIView
7
8 from workshops.models import Badge, Airport, Event
9
10 from .serializers import (
11 ExportBadgesSerializer,
12 ExportInstructorLocationsSerializer,
13 EventSerializer,
14 )
15
16
17 class ApiRoot(APIView):
18 def get(self, request, format=None):
19 return Response({
20 'export-badges': reverse('api:export-badges', request=request,
21 format=format),
22 'export-instructors': reverse('api:export-instructors',
23 request=request, format=format),
24 'events-published': reverse('api:events-published',
25 request=request, format=format),
26 })
27
28
29 class ExportBadgesView(APIView):
30 """List all badges and people who have them."""
31 permission_classes = (IsAuthenticatedOrReadOnly, )
32
33 def get(self, request, format=None):
34 badges = Badge.objects.prefetch_related('person_set')
35 serializer = ExportBadgesSerializer(badges, many=True)
36 return Response(serializer.data)
37
38
39 class ExportInstructorLocationsView(APIView):
40 """List all airports and instructors located near them."""
41 permission_classes = (IsAuthenticatedOrReadOnly, )
42
43 def get(self, request, format=None):
44 # TODO: return only people marked as instructors?
45 airports = Airport.objects.exclude(person=None) \
46 .prefetch_related('person_set')
47 serializer = ExportInstructorLocationsSerializer(airports, many=True)
48 return Response(serializer.data)
49
50
51 class ListEvents(APIView):
52 # I wanted to use ListAPIView, but it had problems with the way we test
53 # this code... Basically ListAPIView uses pagination, and pagination
54 # requires existing Request object - something we're faking in part of the
55 # tests (request = None).
56 serializer_class = EventSerializer
57 permission_classes = (IsAuthenticatedOrReadOnly, )
58 queryset = None # override this in the subclass
59
60 def get(self, request, format=None):
61 objects = self.queryset.all()
62 serializer = self.serializer_class(objects, many=True)
63 return Response(serializer.data)
64
65
66 class PublishedEvents(ListEvents):
67 # only events that have both a starting date and a URL
68 queryset = Event.objects.exclude(
69 Q(start__isnull=True) | Q(url__isnull=True)
70 ).order_by('-start')
71
[end of api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -26,45 +26,31 @@
})
-class ExportBadgesView(APIView):
+class ExportBadgesView(ListAPIView):
"""List all badges and people who have them."""
permission_classes = (IsAuthenticatedOrReadOnly, )
+ paginator = None # disable pagination
- def get(self, request, format=None):
- badges = Badge.objects.prefetch_related('person_set')
- serializer = ExportBadgesSerializer(badges, many=True)
- return Response(serializer.data)
+ queryset = Badge.objects.prefetch_related('person_set')
+ serializer_class = ExportBadgesSerializer
-class ExportInstructorLocationsView(APIView):
+class ExportInstructorLocationsView(ListAPIView):
"""List all airports and instructors located near them."""
permission_classes = (IsAuthenticatedOrReadOnly, )
+ paginator = None # disable pagination
- def get(self, request, format=None):
- # TODO: return only people marked as instructors?
- airports = Airport.objects.exclude(person=None) \
- .prefetch_related('person_set')
- serializer = ExportInstructorLocationsSerializer(airports, many=True)
- return Response(serializer.data)
+ queryset = Airport.objects.exclude(person=None) \
+ .prefetch_related('person_set')
+ serializer_class = ExportInstructorLocationsSerializer
-class ListEvents(APIView):
- # I wanted to use ListAPIView, but it had problems with the way we test
- # this code... Basically ListAPIView uses pagination, and pagination
- # requires existing Request object - something we're faking in part of the
- # tests (request = None).
- serializer_class = EventSerializer
+class PublishedEvents(ListAPIView):
+ # only events that have both a starting date and a URL
permission_classes = (IsAuthenticatedOrReadOnly, )
- queryset = None # override this in the subclass
+ paginator = None # disable pagination
- def get(self, request, format=None):
- objects = self.queryset.all()
- serializer = self.serializer_class(objects, many=True)
- return Response(serializer.data)
-
-
-class PublishedEvents(ListEvents):
- # only events that have both a starting date and a URL
+ serializer_class = EventSerializer
queryset = Event.objects.exclude(
Q(start__isnull=True) | Q(url__isnull=True)
).order_by('-start')
diff --git a/workshops/management/commands/export_airports.py b/workshops/management/commands/export_airports.py
--- a/workshops/management/commands/export_airports.py
+++ b/workshops/management/commands/export_airports.py
@@ -1,10 +1,14 @@
-import yaml
-from django.core.management.base import BaseCommand, CommandError
-from workshops.views import _export_instructors
+from django.core.management.base import BaseCommand
+from django.core.urlresolvers import reverse
+from rest_framework.test import APIClient
+
class Command(BaseCommand):
args = 'no arguments'
help = 'Display YAML for airports.'
def handle(self, *args, **options):
- print(yaml.dump(_export_instructors()).rstrip())
+ client = APIClient()
+ response = client.get(reverse('api:export-instructors'),
+ {'format': 'yaml'})
+ print(response.content.decode('utf-8'))
diff --git a/workshops/management/commands/export_badges.py b/workshops/management/commands/export_badges.py
--- a/workshops/management/commands/export_badges.py
+++ b/workshops/management/commands/export_badges.py
@@ -1,10 +1,14 @@
-import yaml
-from django.core.management.base import BaseCommand, CommandError
-from workshops.views import _export_badges
+from django.core.management.base import BaseCommand
+from django.core.urlresolvers import reverse
+from rest_framework.test import APIClient
+
class Command(BaseCommand):
args = 'no arguments'
help = 'Display YAML for badges.'
def handle(self, *args, **options):
- print(yaml.dump(_export_badges()).rstrip())
+ client = APIClient()
+ response = client.get(reverse('api:export-badges'),
+ {'format': 'yaml'})
+ print(response.content.decode('utf-8'))
| {"golden_diff": "diff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -26,45 +26,31 @@\n })\n \n \n-class ExportBadgesView(APIView):\n+class ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- badges = Badge.objects.prefetch_related('person_set')\n- serializer = ExportBadgesSerializer(badges, many=True)\n- return Response(serializer.data)\n+ queryset = Badge.objects.prefetch_related('person_set')\n+ serializer_class = ExportBadgesSerializer\n \n \n-class ExportInstructorLocationsView(APIView):\n+class ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- # TODO: return only people marked as instructors?\n- airports = Airport.objects.exclude(person=None) \\\n- .prefetch_related('person_set')\n- serializer = ExportInstructorLocationsSerializer(airports, many=True)\n- return Response(serializer.data)\n+ queryset = Airport.objects.exclude(person=None) \\\n+ .prefetch_related('person_set')\n+ serializer_class = ExportInstructorLocationsSerializer\n \n \n-class ListEvents(APIView):\n- # I wanted to use ListAPIView, but it had problems with the way we test\n- # this code... Basically ListAPIView uses pagination, and pagination\n- # requires existing Request object - something we're faking in part of the\n- # tests (request = None).\n- serializer_class = EventSerializer\n+class PublishedEvents(ListAPIView):\n+ # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n- queryset = None # override this in the subclass\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- objects = self.queryset.all()\n- serializer = self.serializer_class(objects, many=True)\n- return Response(serializer.data)\n-\n-\n-class PublishedEvents(ListEvents):\n- # only events that have both a starting date and a URL\n+ serializer_class = EventSerializer\n queryset = Event.objects.exclude(\n Q(start__isnull=True) | Q(url__isnull=True)\n ).order_by('-start')\ndiff --git a/workshops/management/commands/export_airports.py b/workshops/management/commands/export_airports.py\n--- a/workshops/management/commands/export_airports.py\n+++ b/workshops/management/commands/export_airports.py\n@@ -1,10 +1,14 @@\n-import yaml\n-from django.core.management.base import BaseCommand, CommandError\n-from workshops.views import _export_instructors\n+from django.core.management.base import BaseCommand\n+from django.core.urlresolvers import reverse\n+from rest_framework.test import APIClient\n+\n \n class Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for airports.'\n \n def handle(self, *args, **options):\n- print(yaml.dump(_export_instructors()).rstrip())\n+ client = APIClient()\n+ response = client.get(reverse('api:export-instructors'),\n+ {'format': 'yaml'})\n+ print(response.content.decode('utf-8'))\ndiff --git a/workshops/management/commands/export_badges.py b/workshops/management/commands/export_badges.py\n--- a/workshops/management/commands/export_badges.py\n+++ b/workshops/management/commands/export_badges.py\n@@ -1,10 +1,14 @@\n-import yaml\n-from django.core.management.base import BaseCommand, CommandError\n-from workshops.views import _export_badges\n+from django.core.management.base import BaseCommand\n+from django.core.urlresolvers import reverse\n+from rest_framework.test import APIClient\n+\n \n class Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for badges.'\n \n def handle(self, *args, **options):\n- print(yaml.dump(_export_badges()).rstrip())\n+ client = APIClient()\n+ response = client.get(reverse('api:export-badges'),\n+ {'format': 'yaml'})\n+ print(response.content.decode('utf-8'))\n", "issue": "Admin commands broken\nPlease check all the admin commands (use `make` on its own to get a list):\n\n```\n$ make airports\n\n/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/crispy_forms/utils.py:25: RemovedInDjango19Warning: memoize wrapper is deprecated and will be removed in Django 1.9. Use django.utils.lru_cache instead.\n default_field_template = memoize(default_field_template, {}, 1)\n\nTraceback (most recent call last):\n File \"manage.py\", line 10, in <module>\n execute_from_command_line(sys.argv)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 338, in execute_from_command_line\n utility.execute()\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 330, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 190, in fetch_command\n klass = load_command_class(app_name, subcommand)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 40, in load_command_class\n module = import_module('%s.management.commands.%s' % (app_name, name))\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/importlib/__init__.py\", line 90, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n File \"<frozen importlib._bootstrap>\", line 1582, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1563, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1530, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 584, in _check_name_wrapper\n File \"<frozen importlib._bootstrap>\", line 1022, in load_module\n File \"<frozen importlib._bootstrap>\", line 1003, in load_module\n File \"<frozen importlib._bootstrap>\", line 560, in module_for_loader_wrapper\n File \"<frozen importlib._bootstrap>\", line 868, in _load_module\n File \"<frozen importlib._bootstrap>\", line 313, in _call_with_frames_removed\n File \"/Users/gvwilson/s/amy/workshops/management/commands/export_airports.py\", line 3, in <module>\n from workshops.views import _export_instructors\nImportError: cannot import name _export_instructors\nmake: *** [airports] Error 1\n```\n\n", "before_files": [{"content": "import yaml\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.views import _export_instructors\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for airports.'\n\n def handle(self, *args, **options):\n print(yaml.dump(_export_instructors()).rstrip())\n", "path": "workshops/management/commands/export_airports.py"}, {"content": "import yaml\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.views import _export_badges\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for badges.'\n\n def handle(self, *args, **options):\n print(yaml.dump(_export_badges()).rstrip())\n", "path": "workshops/management/commands/export_badges.py"}, {"content": "from django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event\n\nfrom .serializers import (\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n)\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(APIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get(self, request, format=None):\n badges = Badge.objects.prefetch_related('person_set')\n serializer = ExportBadgesSerializer(badges, many=True)\n return Response(serializer.data)\n\n\nclass ExportInstructorLocationsView(APIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get(self, request, format=None):\n # TODO: return only people marked as instructors?\n airports = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer = ExportInstructorLocationsSerializer(airports, many=True)\n return Response(serializer.data)\n\n\nclass ListEvents(APIView):\n # I wanted to use ListAPIView, but it had problems with the way we test\n # this code... Basically ListAPIView uses pagination, and pagination\n # requires existing Request object - something we're faking in part of the\n # tests (request = None).\n serializer_class = EventSerializer\n permission_classes = (IsAuthenticatedOrReadOnly, )\n queryset = None # override this in the subclass\n\n def get(self, request, format=None):\n objects = self.queryset.all()\n serializer = self.serializer_class(objects, many=True)\n return Response(serializer.data)\n\n\nclass PublishedEvents(ListEvents):\n # only events that have both a starting date and a URL\n queryset = Event.objects.exclude(\n Q(start__isnull=True) | Q(url__isnull=True)\n ).order_by('-start')\n", "path": "api/views.py"}]} | 2,080 | 954 |
gh_patches_debug_13993 | rasdani/github-patches | git_diff | google__flax-1324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'flax.linen' has no attribute 'merge_param'
[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.
</issue>
<code>
[start of flax/linen/__init__.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import
19 # re-export commonly used modules and functions
20 from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
21 log_softmax, relu, sigmoid, soft_sign, softmax,
22 softplus, swish, silu, tanh)
23 from .attention import (MultiHeadDotProductAttention, SelfAttention,
24 dot_product_attention, make_attention_mask,
25 make_causal_mask, combine_masks)
26 from ..core import broadcast, DenyList
27 from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
28 from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
29 from .normalization import BatchNorm, GroupNorm, LayerNorm
30 from .pooling import avg_pool, max_pool
31 from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
32 from .stochastic import Dropout
33 from .transforms import jit, named_call, remat, scan, vmap
34 from .initializers import zeros, ones
35
36 # pylint: enable=g-multiple-import
37
[end of flax/linen/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -25,7 +25,8 @@
make_causal_mask, combine_masks)
from ..core import broadcast, DenyList
from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
+from .module import (Module, compact, enable_named_call, disable_named_call,
+ Variable, init, init_with_output, apply, merge_param)
from .normalization import BatchNorm, GroupNorm, LayerNorm
from .pooling import avg_pool, max_pool
from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -25,7 +25,8 @@\n make_causal_mask, combine_masks)\n from ..core import broadcast, DenyList\n from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\n-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\n+from .module import (Module, compact, enable_named_call, disable_named_call,\n+ Variable, init, init_with_output, apply, merge_param)\n from .normalization import BatchNorm, GroupNorm, LayerNorm\n from .pooling import avg_pool, max_pool\n from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\n", "issue": "AttributeError: module 'flax.linen' has no attribute 'merge_param'\n[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.\r\n\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, remat, scan, vmap\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}]} | 1,140 | 201 |
gh_patches_debug_18718 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate Health check restarting sandboxes
### Issue Description
We noticed a few times in both staging and stable environments that the sandboxes appeared to restart even when no one was on them. We weren't able to track down useful information in logs to help assist besides a reference to a health check being performed. Then when EPP connection was added, we saw this same restarting happening often, in fact just trying to set nameservers alone could result in multiple resets. However, this was very sporadic and without a disernable pattern besides "when epp calls are happening". It was determined that the system was failing the health check which does result in a restart. The solution was to increase the timeout on the health check, but that seems like a patch to the problem.
AC:
- [ ] investigate the health check that is currently happening and investigate if this could be resulting by some piece of our architecture (such as a port that is being used and is blocking the health check request)
- [ ] make follow on tickets as needed or document findings here before closing ticket
### Additional Context (optional)
Cloudfoundry has a customer support slack that should be used to get more info on the healthcheck.
To force the health check to occur with some frequency, lower the healthcheck timeout to 1 second.
### Issue Links
May be partially mitigated by #1151
</issue>
<code>
[start of src/djangooidc/views.py]
1 # coding: utf-8
2
3 import logging
4
5 from django.conf import settings
6 from django.contrib.auth import logout as auth_logout
7 from django.contrib.auth import authenticate, login
8 from django.http import HttpResponseRedirect
9 from django.shortcuts import redirect, render
10 from urllib.parse import parse_qs, urlencode
11
12 from djangooidc.oidc import Client
13 from djangooidc import exceptions as o_e
14 from registrar.models import User
15
16 logger = logging.getLogger(__name__)
17
18 try:
19 # Initialize provider using pyOICD
20 OP = getattr(settings, "OIDC_ACTIVE_PROVIDER")
21 CLIENT = Client(OP)
22 logger.debug("client initialized %s" % CLIENT)
23 except Exception as err:
24 CLIENT = None # type: ignore
25 logger.warning(err)
26 logger.warning("Unable to configure OpenID Connect provider. Users cannot log in.")
27
28
29 def error_page(request, error):
30 """Display a sensible message and log the error."""
31 logger.error(error)
32 if isinstance(error, o_e.AuthenticationFailed):
33 return render(
34 request,
35 "401.html",
36 context={
37 "friendly_message": error.friendly_message,
38 "log_identifier": error.locator,
39 },
40 status=401,
41 )
42 if isinstance(error, o_e.InternalError):
43 return render(
44 request,
45 "500.html",
46 context={
47 "friendly_message": error.friendly_message,
48 "log_identifier": error.locator,
49 },
50 status=500,
51 )
52 if isinstance(error, Exception):
53 return render(request, "500.html", status=500)
54
55
56 def openid(request):
57 """Redirect the user to an authentication provider (OP)."""
58
59 # If the session reset because of a server restart, attempt to login again
60 request.session["acr_value"] = CLIENT.get_default_acr_value()
61
62 request.session["next"] = request.GET.get("next", "/")
63
64 try:
65 return CLIENT.create_authn_request(request.session)
66 except Exception as err:
67 return error_page(request, err)
68
69
70 def login_callback(request):
71 """Analyze the token returned by the authentication provider (OP)."""
72 try:
73 query = parse_qs(request.GET.urlencode())
74 userinfo = CLIENT.callback(query, request.session)
75 # test for need for identity verification and if it is satisfied
76 # if not satisfied, redirect user to login with stepped up acr_value
77 if requires_step_up_auth(userinfo):
78 # add acr_value to request.session
79 request.session["acr_value"] = CLIENT.get_step_up_acr_value()
80 return CLIENT.create_authn_request(request.session)
81 user = authenticate(request=request, **userinfo)
82 if user:
83 login(request, user)
84 logger.info("Successfully logged in user %s" % user)
85 # Double login bug (1507)?
86 return redirect(request.session.get("next", "/"))
87 else:
88 raise o_e.BannedUser()
89 except o_e.NoStateDefined as nsd_err:
90 logger.warning(f"No State Defined: {nsd_err}")
91 return redirect(request.session.get("next", "/"))
92 except Exception as err:
93 return error_page(request, err)
94
95
96 def requires_step_up_auth(userinfo):
97 """if User.needs_identity_verification and step_up_acr_value not in
98 ial returned from callback, return True"""
99 step_up_acr_value = CLIENT.get_step_up_acr_value()
100 acr_value = userinfo.get("ial", "")
101 uuid = userinfo.get("sub", "")
102 email = userinfo.get("email", "")
103 if acr_value != step_up_acr_value:
104 # The acr of this attempt is not at the highest level
105 # so check if the user needs the higher level
106 return User.needs_identity_verification(email, uuid)
107 else:
108 # This attempt already came back at the highest level
109 # so does not require step up
110 return False
111
112
113 def logout(request, next_page=None):
114 """Redirect the user to the authentication provider (OP) logout page."""
115 try:
116 user = request.user
117 request_args = {
118 "client_id": CLIENT.client_id,
119 "state": request.session["state"],
120 }
121 if (
122 "post_logout_redirect_uris" in CLIENT.registration_response.keys()
123 and len(CLIENT.registration_response["post_logout_redirect_uris"]) > 0
124 ):
125 request_args.update(
126 {"post_logout_redirect_uri": CLIENT.registration_response["post_logout_redirect_uris"][0]}
127 )
128 url = CLIENT.provider_info["end_session_endpoint"]
129 url += "?" + urlencode(request_args)
130 return HttpResponseRedirect(url)
131 except Exception as err:
132 return error_page(request, err)
133 finally:
134 # Always remove Django session stuff - even if not logged out from OP.
135 # Don't wait for the callback as it may never come.
136 auth_logout(request)
137 logger.info("Successfully logged out user %s" % user)
138 next_page = getattr(settings, "LOGOUT_REDIRECT_URL", None)
139 if next_page:
140 request.session["next"] = next_page
141
142
143 def logout_callback(request):
144 """Simple redirection view: after logout, redirect to `next`."""
145 next = request.session.get("next", "/")
146 return redirect(next)
147
[end of src/djangooidc/views.py]
[start of src/registrar/config/urls.py]
1 """URL Configuration
2
3 For more information see:
4 https://docs.djangoproject.com/en/4.0/topics/http/urls/
5 """
6
7 from django.contrib import admin
8 from django.urls import include, path
9 from django.views.generic import RedirectView
10
11 from registrar import views
12
13 from registrar.views.admin_views import ExportData
14
15
16 from registrar.views.application import Step
17 from registrar.views.utility import always_404
18 from api.views import available, get_current_federal, get_current_full
19
20
21 APPLICATION_NAMESPACE = views.ApplicationWizard.URL_NAMESPACE
22 application_urls = [
23 path("", views.ApplicationWizard.as_view(), name=""),
24 path("finished/", views.Finished.as_view(), name="finished"),
25 ]
26
27 # dynamically generate the other application_urls
28 for step, view in [
29 # add/remove steps here
30 (Step.ORGANIZATION_TYPE, views.OrganizationType),
31 (Step.TRIBAL_GOVERNMENT, views.TribalGovernment),
32 (Step.ORGANIZATION_FEDERAL, views.OrganizationFederal),
33 (Step.ORGANIZATION_ELECTION, views.OrganizationElection),
34 (Step.ORGANIZATION_CONTACT, views.OrganizationContact),
35 (Step.ABOUT_YOUR_ORGANIZATION, views.AboutYourOrganization),
36 (Step.AUTHORIZING_OFFICIAL, views.AuthorizingOfficial),
37 (Step.CURRENT_SITES, views.CurrentSites),
38 (Step.DOTGOV_DOMAIN, views.DotgovDomain),
39 (Step.PURPOSE, views.Purpose),
40 (Step.YOUR_CONTACT, views.YourContact),
41 (Step.OTHER_CONTACTS, views.OtherContacts),
42 (Step.ANYTHING_ELSE, views.AnythingElse),
43 (Step.REQUIREMENTS, views.Requirements),
44 (Step.REVIEW, views.Review),
45 ]:
46 application_urls.append(path(f"{step}/", view.as_view(), name=step))
47
48
49 urlpatterns = [
50 path("", views.index, name="home"),
51 path(
52 "admin/logout/",
53 RedirectView.as_view(pattern_name="logout", permanent=False),
54 ),
55 path("export_data/", ExportData.as_view(), name="admin_export_data"),
56 path("admin/", admin.site.urls),
57 path(
58 "application/<id>/edit/",
59 views.ApplicationWizard.as_view(),
60 name=views.ApplicationWizard.EDIT_URL_NAME,
61 ),
62 path(
63 "application/<int:pk>",
64 views.ApplicationStatus.as_view(),
65 name="application-status",
66 ),
67 path(
68 "application/<int:pk>/withdraw",
69 views.ApplicationWithdrawConfirmation.as_view(),
70 name="application-withdraw-confirmation",
71 ),
72 path(
73 "application/<int:pk>/withdrawconfirmed",
74 views.ApplicationWithdrawn.as_view(),
75 name="application-withdrawn",
76 ),
77 path("health/", views.health),
78 path("openid/", include("djangooidc.urls")),
79 path("request/", include((application_urls, APPLICATION_NAMESPACE))),
80 path("api/v1/available/", available, name="available"),
81 path("api/v1/get-report/current-federal", get_current_federal, name="get-current-federal"),
82 path("api/v1/get-report/current-full", get_current_full, name="get-current-full"),
83 path(
84 "todo",
85 lambda r: always_404(r, "We forgot to include this link, sorry."),
86 name="todo",
87 ),
88 path("domain/<int:pk>", views.DomainView.as_view(), name="domain"),
89 path("domain/<int:pk>/users", views.DomainUsersView.as_view(), name="domain-users"),
90 path(
91 "domain/<int:pk>/dns",
92 views.DomainDNSView.as_view(),
93 name="domain-dns",
94 ),
95 path(
96 "domain/<int:pk>/dns/nameservers",
97 views.DomainNameserversView.as_view(),
98 name="domain-dns-nameservers",
99 ),
100 path(
101 "domain/<int:pk>/dns/dnssec",
102 views.DomainDNSSECView.as_view(),
103 name="domain-dns-dnssec",
104 ),
105 path(
106 "domain/<int:pk>/dns/dnssec/dsdata",
107 views.DomainDsDataView.as_view(),
108 name="domain-dns-dnssec-dsdata",
109 ),
110 path(
111 "domain/<int:pk>/your-contact-information",
112 views.DomainYourContactInformationView.as_view(),
113 name="domain-your-contact-information",
114 ),
115 path(
116 "domain/<int:pk>/org-name-address",
117 views.DomainOrgNameAddressView.as_view(),
118 name="domain-org-name-address",
119 ),
120 path(
121 "domain/<int:pk>/authorizing-official",
122 views.DomainAuthorizingOfficialView.as_view(),
123 name="domain-authorizing-official",
124 ),
125 path(
126 "domain/<int:pk>/security-email",
127 views.DomainSecurityEmailView.as_view(),
128 name="domain-security-email",
129 ),
130 path(
131 "domain/<int:pk>/users/add",
132 views.DomainAddUserView.as_view(),
133 name="domain-users-add",
134 ),
135 path(
136 "invitation/<int:pk>/delete",
137 views.DomainInvitationDeleteView.as_view(http_method_names=["post"]),
138 name="invitation-delete",
139 ),
140 path(
141 "application/<int:pk>/delete",
142 views.DomainApplicationDeleteView.as_view(http_method_names=["post"]),
143 name="application-delete",
144 ),
145 path(
146 "domain/<int:pk>/users/<int:user_pk>/delete",
147 views.DomainDeleteUserView.as_view(http_method_names=["post"]),
148 name="domain-user-delete",
149 ),
150 ]
151
152 # we normally would guard these with `if settings.DEBUG` but tests run with
153 # DEBUG = False even when these apps have been loaded because settings.DEBUG
154 # was actually True. Instead, let's add these URLs any time we are able to
155 # import the debug toolbar package.
156 try:
157 import debug_toolbar # type: ignore
158
159 urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
160 except ImportError:
161 pass
162
[end of src/registrar/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py
--- a/src/djangooidc/views.py
+++ b/src/djangooidc/views.py
@@ -55,7 +55,6 @@
def openid(request):
"""Redirect the user to an authentication provider (OP)."""
-
# If the session reset because of a server restart, attempt to login again
request.session["acr_value"] = CLIENT.get_default_acr_value()
diff --git a/src/registrar/config/urls.py b/src/registrar/config/urls.py
--- a/src/registrar/config/urls.py
+++ b/src/registrar/config/urls.py
@@ -74,7 +74,7 @@
views.ApplicationWithdrawn.as_view(),
name="application-withdrawn",
),
- path("health/", views.health),
+ path("health", views.health, name="health"),
path("openid/", include("djangooidc.urls")),
path("request/", include((application_urls, APPLICATION_NAMESPACE))),
path("api/v1/available/", available, name="available"),
| {"golden_diff": "diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py\n--- a/src/djangooidc/views.py\n+++ b/src/djangooidc/views.py\n@@ -55,7 +55,6 @@\n \n def openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n-\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n \ndiff --git a/src/registrar/config/urls.py b/src/registrar/config/urls.py\n--- a/src/registrar/config/urls.py\n+++ b/src/registrar/config/urls.py\n@@ -74,7 +74,7 @@\n views.ApplicationWithdrawn.as_view(),\n name=\"application-withdrawn\",\n ),\n- path(\"health/\", views.health),\n+ path(\"health\", views.health, name=\"health\"),\n path(\"openid/\", include(\"djangooidc.urls\")),\n path(\"request/\", include((application_urls, APPLICATION_NAMESPACE))),\n path(\"api/v1/available/\", available, name=\"available\"),\n", "issue": "Investigate Health check restarting sandboxes \n### Issue Description\n\nWe noticed a few times in both staging and stable environments that the sandboxes appeared to restart even when no one was on them. We weren't able to track down useful information in logs to help assist besides a reference to a health check being performed. Then when EPP connection was added, we saw this same restarting happening often, in fact just trying to set nameservers alone could result in multiple resets. However, this was very sporadic and without a disernable pattern besides \"when epp calls are happening\". It was determined that the system was failing the health check which does result in a restart. The solution was to increase the timeout on the health check, but that seems like a patch to the problem.\n\nAC:\n\n- [ ] investigate the health check that is currently happening and investigate if this could be resulting by some piece of our architecture (such as a port that is being used and is blocking the health check request)\n- [ ] make follow on tickets as needed or document findings here before closing ticket\n\n### Additional Context (optional)\n\nCloudfoundry has a customer support slack that should be used to get more info on the healthcheck.\n\nTo force the health check to occur with some frequency, lower the healthcheck timeout to 1 second.\n\n### Issue Links\n\nMay be partially mitigated by #1151 \n", "before_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\nfrom registrar.models import User\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"client initialized %s\" % CLIENT)\nexcept Exception as err:\n CLIENT = None # type: ignore\n logger.warning(err)\n logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n\n try:\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n try:\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n if requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n # Double login bug (1507)?\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n\n\ndef requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n acr_value = userinfo.get(\"ial\", \"\")\n uuid = userinfo.get(\"sub\", \"\")\n email = userinfo.get(\"email\", \"\")\n if acr_value != step_up_acr_value:\n # The acr of this attempt is not at the highest level\n # so check if the user needs the higher level\n return User.needs_identity_verification(email, uuid)\n else:\n # This attempt already came back at the highest level\n # so does not require step up\n return False\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n user = request.user\n request_args = {\n \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\"post_logout_redirect_uri\": CLIENT.registration_response[\"post_logout_redirect_uris\"][0]}\n )\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % user)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}, {"content": "\"\"\"URL Configuration\n\nFor more information see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\n\nfrom registrar import views\n\nfrom registrar.views.admin_views import ExportData\n\n\nfrom registrar.views.application import Step\nfrom registrar.views.utility import always_404\nfrom api.views import available, get_current_federal, get_current_full\n\n\nAPPLICATION_NAMESPACE = views.ApplicationWizard.URL_NAMESPACE\napplication_urls = [\n path(\"\", views.ApplicationWizard.as_view(), name=\"\"),\n path(\"finished/\", views.Finished.as_view(), name=\"finished\"),\n]\n\n# dynamically generate the other application_urls\nfor step, view in [\n # add/remove steps here\n (Step.ORGANIZATION_TYPE, views.OrganizationType),\n (Step.TRIBAL_GOVERNMENT, views.TribalGovernment),\n (Step.ORGANIZATION_FEDERAL, views.OrganizationFederal),\n (Step.ORGANIZATION_ELECTION, views.OrganizationElection),\n (Step.ORGANIZATION_CONTACT, views.OrganizationContact),\n (Step.ABOUT_YOUR_ORGANIZATION, views.AboutYourOrganization),\n (Step.AUTHORIZING_OFFICIAL, views.AuthorizingOfficial),\n (Step.CURRENT_SITES, views.CurrentSites),\n (Step.DOTGOV_DOMAIN, views.DotgovDomain),\n (Step.PURPOSE, views.Purpose),\n (Step.YOUR_CONTACT, views.YourContact),\n (Step.OTHER_CONTACTS, views.OtherContacts),\n (Step.ANYTHING_ELSE, views.AnythingElse),\n (Step.REQUIREMENTS, views.Requirements),\n (Step.REVIEW, views.Review),\n]:\n application_urls.append(path(f\"{step}/\", view.as_view(), name=step))\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"home\"),\n path(\n \"admin/logout/\",\n RedirectView.as_view(pattern_name=\"logout\", permanent=False),\n ),\n path(\"export_data/\", ExportData.as_view(), name=\"admin_export_data\"),\n path(\"admin/\", admin.site.urls),\n path(\n \"application/<id>/edit/\",\n views.ApplicationWizard.as_view(),\n name=views.ApplicationWizard.EDIT_URL_NAME,\n ),\n path(\n \"application/<int:pk>\",\n views.ApplicationStatus.as_view(),\n name=\"application-status\",\n ),\n path(\n \"application/<int:pk>/withdraw\",\n views.ApplicationWithdrawConfirmation.as_view(),\n name=\"application-withdraw-confirmation\",\n ),\n path(\n \"application/<int:pk>/withdrawconfirmed\",\n views.ApplicationWithdrawn.as_view(),\n name=\"application-withdrawn\",\n ),\n path(\"health/\", views.health),\n path(\"openid/\", include(\"djangooidc.urls\")),\n path(\"request/\", include((application_urls, APPLICATION_NAMESPACE))),\n path(\"api/v1/available/\", available, name=\"available\"),\n path(\"api/v1/get-report/current-federal\", get_current_federal, name=\"get-current-federal\"),\n path(\"api/v1/get-report/current-full\", get_current_full, name=\"get-current-full\"),\n path(\n \"todo\",\n lambda r: always_404(r, \"We forgot to include this link, sorry.\"),\n name=\"todo\",\n ),\n path(\"domain/<int:pk>\", views.DomainView.as_view(), name=\"domain\"),\n path(\"domain/<int:pk>/users\", views.DomainUsersView.as_view(), name=\"domain-users\"),\n path(\n \"domain/<int:pk>/dns\",\n views.DomainDNSView.as_view(),\n name=\"domain-dns\",\n ),\n path(\n \"domain/<int:pk>/dns/nameservers\",\n views.DomainNameserversView.as_view(),\n name=\"domain-dns-nameservers\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec\",\n views.DomainDNSSECView.as_view(),\n name=\"domain-dns-dnssec\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec/dsdata\",\n views.DomainDsDataView.as_view(),\n name=\"domain-dns-dnssec-dsdata\",\n ),\n path(\n \"domain/<int:pk>/your-contact-information\",\n views.DomainYourContactInformationView.as_view(),\n name=\"domain-your-contact-information\",\n ),\n path(\n \"domain/<int:pk>/org-name-address\",\n views.DomainOrgNameAddressView.as_view(),\n name=\"domain-org-name-address\",\n ),\n path(\n \"domain/<int:pk>/authorizing-official\",\n views.DomainAuthorizingOfficialView.as_view(),\n name=\"domain-authorizing-official\",\n ),\n path(\n \"domain/<int:pk>/security-email\",\n views.DomainSecurityEmailView.as_view(),\n name=\"domain-security-email\",\n ),\n path(\n \"domain/<int:pk>/users/add\",\n views.DomainAddUserView.as_view(),\n name=\"domain-users-add\",\n ),\n path(\n \"invitation/<int:pk>/delete\",\n views.DomainInvitationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"invitation-delete\",\n ),\n path(\n \"application/<int:pk>/delete\",\n views.DomainApplicationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"application-delete\",\n ),\n path(\n \"domain/<int:pk>/users/<int:user_pk>/delete\",\n views.DomainDeleteUserView.as_view(http_method_names=[\"post\"]),\n name=\"domain-user-delete\",\n ),\n]\n\n# we normally would guard these with `if settings.DEBUG` but tests run with\n# DEBUG = False even when these apps have been loaded because settings.DEBUG\n# was actually True. Instead, let's add these URLs any time we are able to\n# import the debug toolbar package.\ntry:\n import debug_toolbar # type: ignore\n\n urlpatterns += [path(\"__debug__/\", include(debug_toolbar.urls))]\nexcept ImportError:\n pass\n", "path": "src/registrar/config/urls.py"}]} | 3,913 | 236 |
gh_patches_debug_29156 | rasdani/github-patches | git_diff | pypi__warehouse-666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fail gracefully if Redis isn't Up
Right now if the redis for the statistics go down, we'll start throwing errors when rendering the page, which is obviously less than desirable. Ideally we'll want to fail gracefully for this so that if redis goes down we'll just stop rendering statistics until it comes back. A tricky thing here is that we'll want to use ESI so that we don't cache for a long time the statistics with the failure output instead of statistics. This makes sense anyways since we'll want our stats to be updated quicker than the bulk of the page anyways. Additionally, we should ensure that we log the error regardless of whether we raise an exception or not.
</issue>
<code>
[start of warehouse/routes.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 def includeme(config):
15 # Basic global routes
16 config.add_route("index", "/", read_only=True)
17 config.add_route("robots.txt", "/robots.txt", read_only=True)
18 config.add_route("index.sitemap.xml", "/sitemap.xml", read_only=True)
19 config.add_route(
20 "bucket.sitemap.xml",
21 "/{bucket}.sitemap.xml",
22 read_only=True,
23 )
24
25 # ESI Routes
26 config.add_route(
27 "esi.current-user-indicator",
28 "/_esi/current-user-indicator/",
29 read_only=True,
30 )
31
32 # Accounts
33 config.add_route(
34 "accounts.profile",
35 "/user/{username}/",
36 factory="warehouse.accounts.models:UserFactory",
37 traverse="/{username}",
38 read_only=True,
39 )
40 config.add_route("accounts.login", "/account/login/")
41 config.add_route("accounts.logout", "/account/logout/")
42
43 # Packaging
44 config.add_route(
45 "packaging.project",
46 "/project/{name}/",
47 factory="warehouse.packaging.models:ProjectFactory",
48 traverse="/{name}",
49 read_only=True,
50 )
51 config.add_route(
52 "packaging.release",
53 "/project/{name}/{version}/",
54 factory="warehouse.packaging.models:ProjectFactory",
55 traverse="/{name}/{version}",
56 read_only=True,
57 )
58 config.add_route("packaging.file", "/packages/{path:.*}", read_only=True)
59
60 # Legacy URLs
61 config.add_route("legacy.api.simple.index", "/simple/", read_only=True)
62 config.add_route(
63 "legacy.api.simple.detail",
64 "/simple/{name}/",
65 factory="warehouse.packaging.models:ProjectFactory",
66 traverse="/{name}/",
67 read_only=True,
68 )
69 config.add_route(
70 "legacy.api.json.project",
71 "/pypi/{name}/json",
72 factory="warehouse.packaging.models:ProjectFactory",
73 traverse="/{name}",
74 read_only=True,
75 )
76 config.add_route(
77 "legacy.api.json.release",
78 "/pypi/{name}/{version}/json",
79 factory="warehouse.packaging.models:ProjectFactory",
80 traverse="/{name}/{version}",
81 read_only=True,
82 )
83
84 # Legacy Action URLs
85 config.add_pypi_action_route("legacy.api.pypi.file_upload", "file_upload")
86 config.add_pypi_action_route("legacy.api.pypi.submit", "submit")
87 config.add_pypi_action_route(
88 "legacy.api.pypi.submit_pkg_info",
89 "submit_pkg_info",
90 )
91 config.add_pypi_action_route("legacy.api.pypi.doc_upload", "doc_upload")
92 config.add_pypi_action_route("legacy.api.pypi.doap", "doap")
93
94 # Legacy XMLRPC
95 config.add_xmlrpc_endpoint(
96 "pypi",
97 pattern="/pypi",
98 header="Content-Type:text/xml",
99 read_only=True,
100 )
101
102 # Legacy Documentation
103 config.add_route("legacy.docs", config.registry.settings["docs.url"])
104
105 # Legacy Redirects
106 config.add_redirect("/pypi/{name}/", "/project/{name}/")
107 config.add_redirect(
108 "/pypi/{name}/{version}/",
109 "/project/{name}/{version}/",
110 )
111
[end of warehouse/routes.py]
[start of warehouse/packaging/views.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.response import FileIter, Response
15 from pyramid.view import view_config
16 from sqlalchemy.orm import joinedload
17 from sqlalchemy.orm.exc import NoResultFound
18
19 from warehouse.accounts.models import User
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
23 from warehouse.packaging.models import Release, File, Role
24
25
26 @view_config(
27 route_name="packaging.project",
28 renderer="packaging/detail.html",
29 decorator=[
30 origin_cache(
31 1 * 24 * 60 * 60, # 1 day
32 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
33 stale_if_error=5 * 24 * 60 * 60, # 5 days
34 ),
35 ],
36 )
37 def project_detail(project, request):
38 if project.name != request.matchdict.get("name", project.name):
39 return HTTPMovedPermanently(
40 request.current_route_path(name=project.name),
41 )
42
43 try:
44 release = project.releases.order_by(
45 Release._pypi_ordering.desc()
46 ).limit(1).one()
47 except NoResultFound:
48 return HTTPNotFound()
49
50 return release_detail(release, request)
51
52
53 @view_config(
54 route_name="packaging.release",
55 renderer="packaging/detail.html",
56 decorator=[
57 origin_cache(
58 1 * 24 * 60 * 60, # 1 day
59 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
60 stale_if_error=5 * 24 * 60 * 60, # 5 days
61 ),
62 ],
63 )
64 def release_detail(release, request):
65 project = release.project
66
67 if project.name != request.matchdict.get("name", project.name):
68 return HTTPMovedPermanently(
69 request.current_route_path(name=project.name),
70 )
71
72 # Get all of the registered versions for this Project, in order of newest
73 # to oldest.
74 all_releases = (
75 project.releases
76 .with_entities(Release.version, Release.created)
77 .order_by(Release._pypi_ordering.desc())
78 .all()
79 )
80
81 # Get all of the maintainers for this project.
82 maintainers = [
83 r.user
84 for r in (
85 request.db.query(Role)
86 .join(User)
87 .filter(Role.project == project)
88 .distinct(User.username)
89 .order_by(User.username)
90 .all()
91 )
92 ]
93
94 stats_svc = request.find_service(IDownloadStatService)
95
96 return {
97 "project": project,
98 "release": release,
99 "files": release.files.all(),
100 "all_releases": all_releases,
101 "maintainers": maintainers,
102 "download_stats": {
103 "daily": stats_svc.get_daily_stats(project.name),
104 "weekly": stats_svc.get_weekly_stats(project.name),
105 "monthly": stats_svc.get_monthly_stats(project.name),
106 },
107 }
108
109
110 @view_config(
111 route_name="packaging.file",
112 decorator=[
113 cache_control(365 * 24 * 60 * 60), # 1 year
114 origin_cache(
115 365 * 24 * 60 * 60, # 1 year
116 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
117 stale_if_error=5 * 24 * 60 * 60, # 5 days
118 ),
119 ],
120 )
121 def packages(request):
122 # The amount of logic that we can do in this view is very limited, this
123 # view needs to be able to be handled by Fastly directly hitting S3 instead
124 # of actually hitting this view. This more or less means that we're limited
125 # to just serving the actual file.
126
127 # Grab the path of the file that we're attempting to serve
128 path = request.matchdict["path"]
129
130 # We need to look up the File that is associated with this path, either the
131 # package path or the pgp path. If that doesn't exist then we'll bail out
132 # early with a 404.
133 try:
134 file_ = (
135 request.db.query(File)
136 .options(joinedload(File.release)
137 .joinedload(Release.project))
138 .filter((File.path == path) | (File.pgp_path == path))
139 .one()
140 )
141 except NoResultFound:
142 return HTTPNotFound()
143
144 # If this request is for a PGP signature, and the file doesn't have a PGP
145 # signature, then we can go ahead and 404 now before hitting the file
146 # storage.
147 if path == file_.pgp_path and not file_.has_signature:
148 return HTTPNotFound()
149
150 # Try to get the file from the file file storage service, logging an error
151 # and returning a HTTPNotFound if one can't be found.
152 try:
153 f = request.find_service(IFileStorage).get(path)
154 except FileNotFoundError:
155 request.log.error("missing file data", path=path)
156 return HTTPNotFound()
157
158 # If the path we're accessing is the path for the package itself, as
159 # opposed to the path for the signature, then we can include a
160 # Content-Length header.
161 content_length = None
162 if path == file_.path:
163 content_length = file_.size
164
165 return Response(
166 # If we have a wsgi.file_wrapper, we'll want to use that so that, if
167 # possible, this will use an optimized method of sending. Otherwise
168 # we'll just use Pyramid's FileIter as a fallback.
169 app_iter=request.environ.get("wsgi.file_wrapper", FileIter)(f),
170 # We use application/octet-stream instead of something nicer because
171 # different HTTP libraries will treat different combinations of
172 # Content-Type and Content-Encoding differently. The only thing that
173 # works sanely across all things without having something in the middle
174 # decide it can decompress the result to "help" the end user is with
175 # Content-Type: applicaton/octet-stream and no Content-Encoding.
176 content_type="application/octet-stream",
177 content_encoding=None,
178 # We need to specify an ETag for this response. Since ETags compared
179 # between URLs have no meaning (and thus, is safe for two URLs to share
180 # the same ETag) we will just use the MD5 hash of the package as our
181 # ETag.
182 etag=file_.md5_digest,
183 # Similarly to the ETag header, we'll just use the date that the file
184 # was uploaded as the Last-Modified header.
185 last_modified=file_.upload_time,
186 # If we have a Content-Length, we'll go ahead and use it here to
187 # hopefully enable the server and clients alike to be smarter about how
188 # they handle downloading this response.
189 content_length=content_length,
190 )
191
[end of warehouse/packaging/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py
--- a/warehouse/packaging/views.py
+++ b/warehouse/packaging/views.py
@@ -91,19 +91,38 @@
)
]
- stats_svc = request.find_service(IDownloadStatService)
-
return {
"project": project,
"release": release,
"files": release.files.all(),
"all_releases": all_releases,
"maintainers": maintainers,
- "download_stats": {
- "daily": stats_svc.get_daily_stats(project.name),
- "weekly": stats_svc.get_weekly_stats(project.name),
- "monthly": stats_svc.get_monthly_stats(project.name),
- },
+ }
+
+
+@view_config(
+ route_name="esi.project-stats",
+ renderer="packaging/includes/project-stats.html",
+ decorator=[
+ origin_cache(
+ 15 * 60, # 15 Minutes
+ stale_while_revalidate=30 * 60, # 30 minutes
+ stale_if_error=30 * 60, # 30 minutes
+ ),
+ ],
+)
+def project_stats(project, request):
+ if project.name != request.matchdict.get("name", project.name):
+ return HTTPMovedPermanently(
+ request.current_route_path(name=project.name),
+ )
+
+ stats_svc = request.find_service(IDownloadStatService)
+
+ return {
+ "daily": stats_svc.get_daily_stats(project.name),
+ "weekly": stats_svc.get_weekly_stats(project.name),
+ "monthly": stats_svc.get_monthly_stats(project.name),
}
diff --git a/warehouse/routes.py b/warehouse/routes.py
--- a/warehouse/routes.py
+++ b/warehouse/routes.py
@@ -28,6 +28,13 @@
"/_esi/current-user-indicator/",
read_only=True,
)
+ config.add_route(
+ "esi.project-stats",
+ "/_esi/project-stats/{name}/",
+ factory="warehouse.packaging.models:ProjectFactory",
+ traverse="/{name}",
+ read_only=True,
+ )
# Accounts
config.add_route(
| {"golden_diff": "diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py\n--- a/warehouse/packaging/views.py\n+++ b/warehouse/packaging/views.py\n@@ -91,19 +91,38 @@\n )\n ]\n \n- stats_svc = request.find_service(IDownloadStatService)\n-\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n- \"download_stats\": {\n- \"daily\": stats_svc.get_daily_stats(project.name),\n- \"weekly\": stats_svc.get_weekly_stats(project.name),\n- \"monthly\": stats_svc.get_monthly_stats(project.name),\n- },\n+ }\n+\n+\n+@view_config(\n+ route_name=\"esi.project-stats\",\n+ renderer=\"packaging/includes/project-stats.html\",\n+ decorator=[\n+ origin_cache(\n+ 15 * 60, # 15 Minutes\n+ stale_while_revalidate=30 * 60, # 30 minutes\n+ stale_if_error=30 * 60, # 30 minutes\n+ ),\n+ ],\n+)\n+def project_stats(project, request):\n+ if project.name != request.matchdict.get(\"name\", project.name):\n+ return HTTPMovedPermanently(\n+ request.current_route_path(name=project.name),\n+ )\n+\n+ stats_svc = request.find_service(IDownloadStatService)\n+\n+ return {\n+ \"daily\": stats_svc.get_daily_stats(project.name),\n+ \"weekly\": stats_svc.get_weekly_stats(project.name),\n+ \"monthly\": stats_svc.get_monthly_stats(project.name),\n }\n \n \ndiff --git a/warehouse/routes.py b/warehouse/routes.py\n--- a/warehouse/routes.py\n+++ b/warehouse/routes.py\n@@ -28,6 +28,13 @@\n \"/_esi/current-user-indicator/\",\n read_only=True,\n )\n+ config.add_route(\n+ \"esi.project-stats\",\n+ \"/_esi/project-stats/{name}/\",\n+ factory=\"warehouse.packaging.models:ProjectFactory\",\n+ traverse=\"/{name}\",\n+ read_only=True,\n+ )\n \n # Accounts\n config.add_route(\n", "issue": "Fail gracefully if Redis isn't Up\nRight now if the redis for the statistics go down, we'll start throwing errors when rendering the page, which is obviously less than desirable. Ideally we'll want to fail gracefully for this so that if redis goes down we'll just stop rendering statistics until it comes back. A tricky thing here is that we'll want to use ESI so that we don't cache for a long time the statistics with the failure output instead of statistics. This makes sense anyways since we'll want our stats to be updated quicker than the bulk of the page anyways. Additionally, we should ensure that we log the error regardless of whether we raise an exception or not.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n # Basic global routes\n config.add_route(\"index\", \"/\", read_only=True)\n config.add_route(\"robots.txt\", \"/robots.txt\", read_only=True)\n config.add_route(\"index.sitemap.xml\", \"/sitemap.xml\", read_only=True)\n config.add_route(\n \"bucket.sitemap.xml\",\n \"/{bucket}.sitemap.xml\",\n read_only=True,\n )\n\n # ESI Routes\n config.add_route(\n \"esi.current-user-indicator\",\n \"/_esi/current-user-indicator/\",\n read_only=True,\n )\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n read_only=True,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import FileIter, Response\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService, IFileStorage\nfrom warehouse.packaging.models import Release, File, Role\n\n\n@view_config(\n route_name=\"packaging.project\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n try:\n release = project.releases.order_by(\n Release._pypi_ordering.desc()\n ).limit(1).one()\n except NoResultFound:\n return HTTPNotFound()\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef release_detail(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n # Get all of the registered versions for this Project, in order of newest\n # to oldest.\n all_releases = (\n project.releases\n .with_entities(Release.version, Release.created)\n .order_by(Release._pypi_ordering.desc())\n .all()\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n \"download_stats\": {\n \"daily\": stats_svc.get_daily_stats(project.name),\n \"weekly\": stats_svc.get_weekly_stats(project.name),\n \"monthly\": stats_svc.get_monthly_stats(project.name),\n },\n }\n\n\n@view_config(\n route_name=\"packaging.file\",\n decorator=[\n cache_control(365 * 24 * 60 * 60), # 1 year\n origin_cache(\n 365 * 24 * 60 * 60, # 1 year\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef packages(request):\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n # to just serving the actual file.\n\n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n\n # We need to look up the File that is associated with this path, either the\n # package path or the pgp path. If that doesn't exist then we'll bail out\n # early with a 404.\n try:\n file_ = (\n request.db.query(File)\n .options(joinedload(File.release)\n .joinedload(Release.project))\n .filter((File.path == path) | (File.pgp_path == path))\n .one()\n )\n except NoResultFound:\n return HTTPNotFound()\n\n # If this request is for a PGP signature, and the file doesn't have a PGP\n # signature, then we can go ahead and 404 now before hitting the file\n # storage.\n if path == file_.pgp_path and not file_.has_signature:\n return HTTPNotFound()\n\n # Try to get the file from the file file storage service, logging an error\n # and returning a HTTPNotFound if one can't be found.\n try:\n f = request.find_service(IFileStorage).get(path)\n except FileNotFoundError:\n request.log.error(\"missing file data\", path=path)\n return HTTPNotFound()\n\n # If the path we're accessing is the path for the package itself, as\n # opposed to the path for the signature, then we can include a\n # Content-Length header.\n content_length = None\n if path == file_.path:\n content_length = file_.size\n\n return Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n app_iter=request.environ.get(\"wsgi.file_wrapper\", FileIter)(f),\n # We use application/octet-stream instead of something nicer because\n # different HTTP libraries will treat different combinations of\n # Content-Type and Content-Encoding differently. The only thing that\n # works sanely across all things without having something in the middle\n # decide it can decompress the result to \"help\" the end user is with\n # Content-Type: applicaton/octet-stream and no Content-Encoding.\n content_type=\"application/octet-stream\",\n content_encoding=None,\n # We need to specify an ETag for this response. Since ETags compared\n # between URLs have no meaning (and thus, is safe for two URLs to share\n # the same ETag) we will just use the MD5 hash of the package as our\n # ETag.\n etag=file_.md5_digest,\n # Similarly to the ETag header, we'll just use the date that the file\n # was uploaded as the Last-Modified header.\n last_modified=file_.upload_time,\n # If we have a Content-Length, we'll go ahead and use it here to\n # hopefully enable the server and clients alike to be smarter about how\n # they handle downloading this response.\n content_length=content_length,\n )\n", "path": "warehouse/packaging/views.py"}]} | 3,857 | 499 |
gh_patches_debug_28915 | rasdani/github-patches | git_diff | ckan__ckan-6797 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for pdb and debuggers
### Proposed fixes:
It is now possible to debug ckan with pdb/ipdb/PyCharm debugger and others, both outside Docker and inside Docker.
I just exposed a `werkzeug` option to the CKAN CLI, called `passthrough_errors`. Enabling that, together with `--disable-reloader` (which should be the default in my opinion, like it was in the past), allow to run pdb without making other changes to the source code.
`threads` should not be enabled and `processes` should be set to 1. These are the defaults already.
> passthrough_errors (bool) – set this to True to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)
-- https://werkzeug.palletsprojects.com/en/2.0.x/serving/
Example:
```
$ cd contrib/docker
$ docker-compose up --build -d
$ # wait...
$ docker-compose exec ckan bash
root@f6a71d0b7686:/# python3 -m pdb /usr/lib/ckan/venv/bin/ckan -c /etc/ckan/production.ini run --host 0.0.0.0 -E --disable-reloader
> /usr/lib/ckan/venv/bin/ckan(3)<module>()
-> import re
(Pdb) b ckan/views/api.py:215
Breakpoint 1 at /usr/lib/ckan/venv/src/ckan/ckan/views/api.py:215
(Pdb) c
2021-11-01 17:00:50,832 INFO [ckan.cli] Using configuration file /etc/ckan/production.ini
2021-11-01 17:00:50,832 INFO [ckan.config.environment] Loading static files from public
2021-11-01 17:00:50,954 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates
2021-11-01 17:00:51,552 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates
2021-11-01 17:00:52,173 INFO [ckan.cli.server] Running CKAN on http://0.0.0.0:5000
2021-11-01 17:00:52,174 WARNI [werkzeug] * Running on all addresses.
WARNING: This is a development server. Do not use it in a production deployment.
```
...then `http://localhost:5000/api/3/action/package_search` can be opened in the web browser to trigger the breakpoint:
```
> /usr/lib/ckan/venv/src/ckan/ckan/views/api.py(215)action()
-> try:
(Pdb)
```
### Features:
- [ ] includes tests covering changes
- [x] includes updated documentation
- [X] includes user-visible changes
- [ ] includes API changes
- [ ] includes bugfix for possible backport
</issue>
<code>
[start of ckan/cli/server.py]
1 # encoding: utf-8
2
3 import logging
4
5 import click
6 from werkzeug.serving import run_simple
7
8 import ckan.plugins.toolkit as tk
9 from ckan.common import config
10
11 log = logging.getLogger(__name__)
12
13 DEFAULT_HOST = u"localhost"
14 DEFAULT_PORT = 5000
15
16
17 @click.command(u"run", short_help=u"Start development server")
18 @click.option(u"-H", u"--host", help=u"Host name")
19 @click.option(u"-p", u"--port", help=u"Port number")
20 @click.option(u"-r", u"--disable-reloader", is_flag=True,
21 help=u"Disable reloader")
22 @click.option(
23 u"-t", u"--threaded", is_flag=True,
24 help=u"Handle each request in a separate thread"
25 )
26 @click.option(
27 u"--processes", type=int, default=0,
28 help=u"Maximum number of concurrent processes"
29 )
30 @click.option(
31 u"-e", u"--extra-files", multiple=True,
32 help=u"Additional files that should be watched for server reloading"
33 " (you can provide multiple values)")
34 @click.option(
35 u"-C", u"--ssl-cert", default=None,
36 help=u"Certificate file to use to enable SSL. Passing 'adhoc' will "
37 " automatically generate a new one (on each server reload).")
38 @click.option(
39 u"-K", u"--ssl-key", default=None,
40 help=u"Key file to use to enable SSL. Passing 'adhoc' will "
41 " automatically generate a new one (on each server reload).")
42 @click.pass_context
43 def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,
44 ssl_cert, ssl_key):
45 u"""Runs the Werkzeug development server"""
46
47 # Reloading
48 use_reloader = not disable_reloader
49 config_extra_files = tk.aslist(
50 config.get(u"ckan.devserver.watch_patterns")
51 )
52 extra_files = list(extra_files) + [
53 config[u"__file__"]
54 ] + config_extra_files
55
56 # Threads and processes
57 threaded = threaded or tk.asbool(config.get(u"ckan.devserver.threaded"))
58 processes = processes or tk.asint(
59 config.get(u"ckan.devserver.multiprocess", 1)
60 )
61 if threaded and processes > 1:
62 tk.error_shout(u"Cannot have a multithreaded and multi process server")
63 raise click.Abort()
64
65 # SSL
66 cert_file = ssl_cert or config.get(u"ckan.devserver.ssl_cert")
67 key_file = ssl_key or config.get(u"ckan.devserver.ssl_key")
68
69 if cert_file and key_file:
70 if cert_file == key_file == u"adhoc":
71 ssl_context = u"adhoc"
72 else:
73 ssl_context = (ssl_cert, ssl_key)
74 else:
75 ssl_context = None
76
77 host = host or config.get(u'ckan.devserver.host', DEFAULT_HOST)
78 port = port or config.get(u'ckan.devserver.port', DEFAULT_PORT)
79 try:
80 port = int(port)
81 except ValueError:
82 tk.error_shout(u"Server port must be an integer, not {}".format(port))
83 raise click.Abort()
84
85 log.info(u"Running CKAN on {scheme}://{host}:{port}".format(
86 scheme=u"https" if ssl_context else u"http", host=host, port=port))
87
88 run_simple(
89 host,
90 port,
91 ctx.obj.app,
92 use_reloader=use_reloader,
93 use_evalex=True,
94 threaded=threaded,
95 processes=processes,
96 extra_files=extra_files,
97 ssl_context=ssl_context,
98 )
99
[end of ckan/cli/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/cli/server.py b/ckan/cli/server.py
--- a/ckan/cli/server.py
+++ b/ckan/cli/server.py
@@ -19,6 +19,8 @@
@click.option(u"-p", u"--port", help=u"Port number")
@click.option(u"-r", u"--disable-reloader", is_flag=True,
help=u"Disable reloader")
[email protected](u"-E", u"--passthrough-errors", is_flag=True,
+ help=u"Disable error caching (useful to hook debuggers)")
@click.option(
u"-t", u"--threaded", is_flag=True,
help=u"Handle each request in a separate thread"
@@ -40,10 +42,16 @@
help=u"Key file to use to enable SSL. Passing 'adhoc' will "
" automatically generate a new one (on each server reload).")
@click.pass_context
-def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,
- ssl_cert, ssl_key):
+def run(ctx, host, port, disable_reloader, passthrough_errors, threaded,
+ extra_files, processes, ssl_cert, ssl_key):
u"""Runs the Werkzeug development server"""
+ # passthrough_errors overrides conflicting options
+ if passthrough_errors:
+ disable_reloader = True
+ threaded = False
+ processes = 1
+
# Reloading
use_reloader = not disable_reloader
config_extra_files = tk.aslist(
@@ -95,4 +103,5 @@
processes=processes,
extra_files=extra_files,
ssl_context=ssl_context,
+ passthrough_errors=passthrough_errors,
)
| {"golden_diff": "diff --git a/ckan/cli/server.py b/ckan/cli/server.py\n--- a/ckan/cli/server.py\n+++ b/ckan/cli/server.py\n@@ -19,6 +19,8 @@\n @click.option(u\"-p\", u\"--port\", help=u\"Port number\")\n @click.option(u\"-r\", u\"--disable-reloader\", is_flag=True,\n help=u\"Disable reloader\")\[email protected](u\"-E\", u\"--passthrough-errors\", is_flag=True,\n+ help=u\"Disable error caching (useful to hook debuggers)\")\n @click.option(\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n@@ -40,10 +42,16 @@\n help=u\"Key file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\n @click.pass_context\n-def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,\n- ssl_cert, ssl_key):\n+def run(ctx, host, port, disable_reloader, passthrough_errors, threaded,\n+ extra_files, processes, ssl_cert, ssl_key):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n \n+ # passthrough_errors overrides conflicting options\n+ if passthrough_errors:\n+ disable_reloader = True\n+ threaded = False\n+ processes = 1\n+\n # Reloading\n use_reloader = not disable_reloader\n config_extra_files = tk.aslist(\n@@ -95,4 +103,5 @@\n processes=processes,\n extra_files=extra_files,\n ssl_context=ssl_context,\n+ passthrough_errors=passthrough_errors,\n )\n", "issue": "Support for pdb and debuggers\n### Proposed fixes:\r\n\r\nIt is now possible to debug ckan with pdb/ipdb/PyCharm debugger and others, both outside Docker and inside Docker.\r\nI just exposed a `werkzeug` option to the CKAN CLI, called `passthrough_errors`. Enabling that, together with `--disable-reloader` (which should be the default in my opinion, like it was in the past), allow to run pdb without making other changes to the source code.\r\n`threads` should not be enabled and `processes` should be set to 1. These are the defaults already.\r\n\r\n> passthrough_errors (bool) \u2013 set this to True to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)\r\n-- https://werkzeug.palletsprojects.com/en/2.0.x/serving/\r\n\r\nExample:\r\n```\r\n$ cd contrib/docker\r\n$ docker-compose up --build -d\r\n$ # wait...\r\n$ docker-compose exec ckan bash\r\nroot@f6a71d0b7686:/# python3 -m pdb /usr/lib/ckan/venv/bin/ckan -c /etc/ckan/production.ini run --host 0.0.0.0 -E --disable-reloader\r\n> /usr/lib/ckan/venv/bin/ckan(3)<module>()\r\n-> import re\r\n(Pdb) b ckan/views/api.py:215\r\nBreakpoint 1 at /usr/lib/ckan/venv/src/ckan/ckan/views/api.py:215\r\n(Pdb) c\r\n2021-11-01 17:00:50,832 INFO [ckan.cli] Using configuration file /etc/ckan/production.ini\r\n2021-11-01 17:00:50,832 INFO [ckan.config.environment] Loading static files from public\r\n2021-11-01 17:00:50,954 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates\r\n2021-11-01 17:00:51,552 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates\r\n2021-11-01 17:00:52,173 INFO [ckan.cli.server] Running CKAN on http://0.0.0.0:5000\r\n2021-11-01 17:00:52,174 WARNI [werkzeug] * Running on all addresses.\r\n WARNING: This is a development server. Do not use it in a production deployment.\r\n```\r\n\r\n...then `http://localhost:5000/api/3/action/package_search` can be opened in the web browser to trigger the breakpoint:\r\n\r\n```\r\n> /usr/lib/ckan/venv/src/ckan/ckan/views/api.py(215)action()\r\n-> try:\r\n(Pdb)\r\n```\r\n\r\n### Features:\r\n\r\n- [ ] includes tests covering changes\r\n- [x] includes updated documentation\r\n- [X] includes user-visible changes\r\n- [ ] includes API changes\r\n- [ ] includes bugfix for possible backport\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nimport click\nfrom werkzeug.serving import run_simple\n\nimport ckan.plugins.toolkit as tk\nfrom ckan.common import config\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_HOST = u\"localhost\"\nDEFAULT_PORT = 5000\n\n\[email protected](u\"run\", short_help=u\"Start development server\")\[email protected](u\"-H\", u\"--host\", help=u\"Host name\")\[email protected](u\"-p\", u\"--port\", help=u\"Port number\")\[email protected](u\"-r\", u\"--disable-reloader\", is_flag=True,\n help=u\"Disable reloader\")\[email protected](\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n)\[email protected](\n u\"--processes\", type=int, default=0,\n help=u\"Maximum number of concurrent processes\"\n)\[email protected](\n u\"-e\", u\"--extra-files\", multiple=True,\n help=u\"Additional files that should be watched for server reloading\"\n \" (you can provide multiple values)\")\[email protected](\n u\"-C\", u\"--ssl-cert\", default=None,\n help=u\"Certificate file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected](\n u\"-K\", u\"--ssl-key\", default=None,\n help=u\"Key file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected]_context\ndef run(ctx, host, port, disable_reloader, threaded, extra_files, processes,\n ssl_cert, ssl_key):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n\n # Reloading\n use_reloader = not disable_reloader\n config_extra_files = tk.aslist(\n config.get(u\"ckan.devserver.watch_patterns\")\n )\n extra_files = list(extra_files) + [\n config[u\"__file__\"]\n ] + config_extra_files\n\n # Threads and processes\n threaded = threaded or tk.asbool(config.get(u\"ckan.devserver.threaded\"))\n processes = processes or tk.asint(\n config.get(u\"ckan.devserver.multiprocess\", 1)\n )\n if threaded and processes > 1:\n tk.error_shout(u\"Cannot have a multithreaded and multi process server\")\n raise click.Abort()\n\n # SSL\n cert_file = ssl_cert or config.get(u\"ckan.devserver.ssl_cert\")\n key_file = ssl_key or config.get(u\"ckan.devserver.ssl_key\")\n\n if cert_file and key_file:\n if cert_file == key_file == u\"adhoc\":\n ssl_context = u\"adhoc\"\n else:\n ssl_context = (ssl_cert, ssl_key)\n else:\n ssl_context = None\n\n host = host or config.get(u'ckan.devserver.host', DEFAULT_HOST)\n port = port or config.get(u'ckan.devserver.port', DEFAULT_PORT)\n try:\n port = int(port)\n except ValueError:\n tk.error_shout(u\"Server port must be an integer, not {}\".format(port))\n raise click.Abort()\n\n log.info(u\"Running CKAN on {scheme}://{host}:{port}\".format(\n scheme=u\"https\" if ssl_context else u\"http\", host=host, port=port))\n\n run_simple(\n host,\n port,\n ctx.obj.app,\n use_reloader=use_reloader,\n use_evalex=True,\n threaded=threaded,\n processes=processes,\n extra_files=extra_files,\n ssl_context=ssl_context,\n )\n", "path": "ckan/cli/server.py"}]} | 2,280 | 385 |
gh_patches_debug_6418 | rasdani/github-patches | git_diff | getsentry__sentry-python-3081 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`sentry-sdk[grpcio]` requires `protobuf`
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
2.2.0
### Steps to Reproduce
See: https://stackoverflow.com/questions/78488561
Followed Sentry documentation [gRPC](https://docs.sentry.io/platforms/python/integrations/grpc/)
```bash
python3 -m venv venv
source venv/bin/activate
python3 -m pip install --upgrade 'sentry-sdk[grpcio]'
python3 -m pip freeze
```
```
certifi==2024.2.2
grpcio==1.63.0
sentry-sdk==2.2.0
urllib3==2.2.1
```
`main.py`:
```python3
import grpc
import sentry_sdk
from sentry_sdk.integrations.grpc import GRPCIntegration
sentry_sdk.init(
dsn="https://...",
enable_tracing=True,
integrations=[
GRPCIntegration(),
],
)
with grpc.insecure_channel("example.com:12345") as channel:
print(channel)
```
### Expected Result
Happy path.
### Actual Result
```bash
python3 main.py
Traceback (most recent call last):
File "[redacted]/venv/lib/python3.10/site-packages/sentry_sdk/integrations/grpc/client.py", line 15, in <module>
from google.protobuf.message import Message
ModuleNotFoundError: No module named 'google'
```
`integrations/grpc/client.py`:
```python3
try:
import grpc
from grpc import ClientCallDetails, Call
from grpc._interceptor import _UnaryOutcome
from grpc.aio._interceptor import UnaryStreamCall
from google.protobuf.message import Message <-------------- curious
except ImportError:
raise DidNotEnable("grpcio is not installed")
```
#### Solution
```bash
python3 -m pip install protobuf
python3 -m pip freeze
```
```
certifi==2024.2.2
grpcio==1.63.0
protobuf==5.26.1
sentry-sdk==2.2.0
urllib3==2.2.1
```
Then works.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 """
4 Sentry-Python - Sentry SDK for Python
5 =====================================
6
7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub
8 <https://github.com/getsentry/sentry-python>`_ to find out more.
9 """
10
11 import os
12 from setuptools import setup, find_packages
13
14 here = os.path.abspath(os.path.dirname(__file__))
15
16
17 def get_file_text(file_name):
18 with open(os.path.join(here, file_name)) as in_file:
19 return in_file.read()
20
21
22 setup(
23 name="sentry-sdk",
24 version="2.2.0",
25 author="Sentry Team and Contributors",
26 author_email="[email protected]",
27 url="https://github.com/getsentry/sentry-python",
28 project_urls={
29 "Documentation": "https://docs.sentry.io/platforms/python/",
30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md",
31 },
32 description="Python client for Sentry (https://sentry.io)",
33 long_description=get_file_text("README.md"),
34 long_description_content_type="text/markdown",
35 packages=find_packages(exclude=("tests", "tests.*")),
36 # PEP 561
37 package_data={"sentry_sdk": ["py.typed"]},
38 zip_safe=False,
39 license="MIT",
40 python_requires=">=3.6",
41 install_requires=[
42 "urllib3>=1.26.11",
43 "certifi",
44 ],
45 extras_require={
46 "aiohttp": ["aiohttp>=3.5"],
47 "anthropic": ["anthropic>=0.16"],
48 "arq": ["arq>=0.23"],
49 "asyncpg": ["asyncpg>=0.23"],
50 "beam": ["apache-beam>=2.12"],
51 "bottle": ["bottle>=0.12.13"],
52 "celery": ["celery>=3"],
53 "celery-redbeat": ["celery-redbeat>=2"],
54 "chalice": ["chalice>=1.16.0"],
55 "clickhouse-driver": ["clickhouse-driver>=0.2.0"],
56 "django": ["django>=1.8"],
57 "falcon": ["falcon>=1.4"],
58 "fastapi": ["fastapi>=0.79.0"],
59 "flask": ["flask>=0.11", "blinker>=1.1", "markupsafe"],
60 "grpcio": ["grpcio>=1.21.1"],
61 "httpx": ["httpx>=0.16.0"],
62 "huey": ["huey>=2"],
63 "huggingface_hub": ["huggingface_hub>=0.22"],
64 "langchain": ["langchain>=0.0.210"],
65 "loguru": ["loguru>=0.5"],
66 "openai": ["openai>=1.0.0", "tiktoken>=0.3.0"],
67 "opentelemetry": ["opentelemetry-distro>=0.35b0"],
68 "opentelemetry-experimental": [
69 "opentelemetry-distro~=0.40b0",
70 "opentelemetry-instrumentation-aiohttp-client~=0.40b0",
71 "opentelemetry-instrumentation-django~=0.40b0",
72 "opentelemetry-instrumentation-fastapi~=0.40b0",
73 "opentelemetry-instrumentation-flask~=0.40b0",
74 "opentelemetry-instrumentation-requests~=0.40b0",
75 "opentelemetry-instrumentation-sqlite3~=0.40b0",
76 "opentelemetry-instrumentation-urllib~=0.40b0",
77 ],
78 "pure_eval": ["pure_eval", "executing", "asttokens"],
79 "pymongo": ["pymongo>=3.1"],
80 "pyspark": ["pyspark>=2.4.4"],
81 "quart": ["quart>=0.16.1", "blinker>=1.1"],
82 "rq": ["rq>=0.6"],
83 "sanic": ["sanic>=0.8"],
84 "sqlalchemy": ["sqlalchemy>=1.2"],
85 "starlette": ["starlette>=0.19.1"],
86 "starlite": ["starlite>=1.48"],
87 "tornado": ["tornado>=5"],
88 },
89 classifiers=[
90 "Development Status :: 5 - Production/Stable",
91 "Environment :: Web Environment",
92 "Intended Audience :: Developers",
93 "License :: OSI Approved :: BSD License",
94 "Operating System :: OS Independent",
95 "Programming Language :: Python",
96 "Programming Language :: Python :: 3",
97 "Programming Language :: Python :: 3.6",
98 "Programming Language :: Python :: 3.7",
99 "Programming Language :: Python :: 3.8",
100 "Programming Language :: Python :: 3.9",
101 "Programming Language :: Python :: 3.10",
102 "Programming Language :: Python :: 3.11",
103 "Programming Language :: Python :: 3.12",
104 "Topic :: Software Development :: Libraries :: Python Modules",
105 ],
106 options={"bdist_wheel": {"universal": "1"}},
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
"falcon": ["falcon>=1.4"],
"fastapi": ["fastapi>=0.79.0"],
"flask": ["flask>=0.11", "blinker>=1.1", "markupsafe"],
- "grpcio": ["grpcio>=1.21.1"],
+ "grpcio": ["grpcio>=1.21.1", "protobuf>=3.8.0"],
"httpx": ["httpx>=0.16.0"],
"huey": ["huey>=2"],
"huggingface_hub": ["huggingface_hub>=0.22"],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n \"falcon\": [\"falcon>=1.4\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\", \"markupsafe\"],\n- \"grpcio\": [\"grpcio>=1.21.1\"],\n+ \"grpcio\": [\"grpcio>=1.21.1\", \"protobuf>=3.8.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"huey\": [\"huey>=2\"],\n \"huggingface_hub\": [\"huggingface_hub>=0.22\"],\n", "issue": "`sentry-sdk[grpcio]` requires `protobuf`\n### How do you use Sentry?\r\n\r\nSentry Saas (sentry.io)\r\n\r\n### Version\r\n\r\n2.2.0\r\n\r\n### Steps to Reproduce\r\n\r\nSee: https://stackoverflow.com/questions/78488561\r\n\r\nFollowed Sentry documentation [gRPC](https://docs.sentry.io/platforms/python/integrations/grpc/)\r\n\r\n```bash\r\npython3 -m venv venv\r\nsource venv/bin/activate\r\npython3 -m pip install --upgrade 'sentry-sdk[grpcio]'\r\n\r\npython3 -m pip freeze\r\n```\r\n```\r\ncertifi==2024.2.2\r\ngrpcio==1.63.0\r\nsentry-sdk==2.2.0\r\nurllib3==2.2.1\r\n```\r\n`main.py`:\r\n```python3\r\nimport grpc\r\n\r\nimport sentry_sdk\r\nfrom sentry_sdk.integrations.grpc import GRPCIntegration\r\n\r\nsentry_sdk.init(\r\n dsn=\"https://...\",\r\n enable_tracing=True,\r\n integrations=[\r\n GRPCIntegration(),\r\n ],\r\n)\r\n\r\nwith grpc.insecure_channel(\"example.com:12345\") as channel:\r\n print(channel)\r\n```\r\n\r\n\r\n### Expected Result\r\n\r\nHappy path.\r\n\r\n### Actual Result\r\n\r\n```bash\r\npython3 main.py\r\nTraceback (most recent call last):\r\n File \"[redacted]/venv/lib/python3.10/site-packages/sentry_sdk/integrations/grpc/client.py\", line 15, in <module>\r\n from google.protobuf.message import Message\r\nModuleNotFoundError: No module named 'google'\r\n```\r\n`integrations/grpc/client.py`:\r\n```python3\r\ntry:\r\n import grpc\r\n from grpc import ClientCallDetails, Call\r\n from grpc._interceptor import _UnaryOutcome\r\n from grpc.aio._interceptor import UnaryStreamCall\r\n from google.protobuf.message import Message <-------------- curious\r\nexcept ImportError:\r\n raise DidNotEnable(\"grpcio is not installed\")\r\n```\r\n\r\n#### Solution\r\n\r\n```bash\r\npython3 -m pip install protobuf\r\npython3 -m pip freeze\r\n```\r\n```\r\ncertifi==2024.2.2\r\ngrpcio==1.63.0\r\nprotobuf==5.26.1\r\nsentry-sdk==2.2.0\r\nurllib3==2.2.1\r\n```\r\nThen works.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"2.2.0\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n python_requires=\">=3.6\",\n install_requires=[\n \"urllib3>=1.26.11\",\n \"certifi\",\n ],\n extras_require={\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"anthropic\": [\"anthropic>=0.16\"],\n \"arq\": [\"arq>=0.23\"],\n \"asyncpg\": [\"asyncpg>=0.23\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"celery\": [\"celery>=3\"],\n \"celery-redbeat\": [\"celery-redbeat>=2\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"clickhouse-driver\": [\"clickhouse-driver>=0.2.0\"],\n \"django\": [\"django>=1.8\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\", \"markupsafe\"],\n \"grpcio\": [\"grpcio>=1.21.1\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"huey\": [\"huey>=2\"],\n \"huggingface_hub\": [\"huggingface_hub>=0.22\"],\n \"langchain\": [\"langchain>=0.0.210\"],\n \"loguru\": [\"loguru>=0.5\"],\n \"openai\": [\"openai>=1.0.0\", \"tiktoken>=0.3.0\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"opentelemetry-experimental\": [\n \"opentelemetry-distro~=0.40b0\",\n \"opentelemetry-instrumentation-aiohttp-client~=0.40b0\",\n \"opentelemetry-instrumentation-django~=0.40b0\",\n \"opentelemetry-instrumentation-fastapi~=0.40b0\",\n \"opentelemetry-instrumentation-flask~=0.40b0\",\n \"opentelemetry-instrumentation-requests~=0.40b0\",\n \"opentelemetry-instrumentation-sqlite3~=0.40b0\",\n \"opentelemetry-instrumentation-urllib~=0.40b0\",\n ],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"rq\": [\"rq>=0.6\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"tornado\": [\"tornado>=5\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]} | 2,416 | 176 |
gh_patches_debug_21049 | rasdani/github-patches | git_diff | aws__aws-cli-461 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No way to specify all for --protocol option
The `aws ec2 authorize-security-group-ingress` and `aws ec2 authorize-security-group-egress` commands both accept a `--protocol` option but there is no way to supply a value that represents ALL protocols.
</issue>
<code>
[start of awscli/customizations/ec2secgroupsimplify.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds the following scalar parameters to the
15 authorize operations:
16
17 * --protocol: tcp | udp | icmp or any protocol number
18 * --port: A single integer or a range (min-max). You can specify ``all``
19 to mean all ports (for example, port range 0-65535)
20 * --source-group-name
21 * --source-group-id
22 * --cidr - The CIDR range. Cannot be used when specifying a source or
23 destination security group.
24 """
25
26 from awscli.arguments import CustomArgument
27
28
29 def _add_params(argument_table, operation, **kwargs):
30 arg = ProtocolArgument('protocol',
31 help_text=PROTOCOL_DOCS)
32 argument_table['protocol'] = arg
33 arg = PortArgument('port', help_text=PORT_DOCS)
34 argument_table['port'] = arg
35 arg = CidrArgument('cidr', help_text=CIDR_DOCS)
36 argument_table['cidr'] = arg
37 arg = SourceGroupArgument('source-group',
38 help_text=SOURCEGROUP_DOCS)
39 argument_table['source-group'] = arg
40 arg = GroupOwnerArgument('group-owner',
41 help_text=GROUPOWNER_DOCS)
42 argument_table['group-owner'] = arg
43
44
45 def _check_args(parsed_args, **kwargs):
46 # This function checks the parsed args. If the user specified
47 # the --ip-permissions option with any of the scalar options we
48 # raise an error.
49 arg_dict = vars(parsed_args)
50 if arg_dict['ip_permissions']:
51 for key in ('protocol', 'port', 'cidr',
52 'source_group', 'group_owner'):
53 if arg_dict[key]:
54 msg = ('The --%s option is not compatible '
55 'with the --ip-permissions option ') % key
56 raise ValueError(msg)
57
58 def _add_docs(help_command, **kwargs):
59 doc = help_command.doc
60 doc.style.new_paragraph()
61 doc.style.start_note()
62 msg = ('To specify multiple rules in a single command '
63 'use the <code>--ip-permissions</code> option')
64 doc.include_doc_string(msg)
65 doc.style.end_note()
66
67
68 EVENTS = [
69 ('building-argument-table.ec2.authorize-security-group-ingress', _add_params),
70 ('building-argument-table.ec2.authorize-security-group-egress', _add_params),
71 ('building-argument-table.ec2.revoke-security-group-ingress', _add_params),
72 ('building-argument-table.ec2.revoke-security-group-egress', _add_params),
73 ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args),
74 ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),
75 ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),
76 ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),
77 ('doc-description.ec2.authorize-security-group-ingress', _add_docs),
78 ('doc-description.ec2.authorize-security-group-egress', _add_docs),
79 ('doc-description.ec2.revoke-security-group-ingress', _add_docs),
80 ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),
81 ]
82 PROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'
83 '<p>Valid protocol values: <code>tcp</code>, '
84 '<code>udp</code>, <code>icmp</code></p>')
85 PORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'
86 ' A single integer or a range (min-max). You can '
87 'specify <code>all</code> to mean all ports</p>')
88 CIDR_DOCS = '<p>The CIDR IP range.</p>'
89 SOURCEGROUP_DOCS = ('<p>The name of the source security group. '
90 'Cannot be used when specifying a CIDR IP address.')
91 GROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '
92 'group. Cannot be used when specifying a CIDR IP '
93 'address.</p>')
94
95 def register_secgroup(event_handler):
96 for event, handler in EVENTS:
97 event_handler.register(event, handler)
98
99
100 def _build_ip_permissions(params, key, value):
101 if 'ip_permissions' not in params:
102 params['ip_permissions'] = [{}]
103 if key == 'CidrIp':
104 if 'IpRanges' not in params['ip_permissions'][0]:
105 params['ip_permissions'][0]['IpRanges'] = []
106 params['ip_permissions'][0]['IpRanges'].append(value)
107 elif key in ('GroupId', 'GroupName', 'UserId'):
108 if 'UserIdGroupPairs' not in params['ip_permissions'][0]:
109 params['ip_permissions'][0]['UserIdGroupPairs'] = [{}]
110 params['ip_permissions'][0]['UserIdGroupPairs'][0][key] = value
111 else:
112 params['ip_permissions'][0][key] = value
113
114
115 class ProtocolArgument(CustomArgument):
116
117 def add_to_params(self, parameters, value):
118 if value:
119 try:
120 int_value = int(value)
121 if int_value < 0 or int_value > 255:
122 msg = ('protocol numbers must be in the range 0-255')
123 raise ValueError(msg)
124 except ValueError:
125 if value not in ('tcp', 'udp', 'icmp'):
126 msg = ('protocol parameter should be one of: '
127 'tcp|udp|icmp or any valid protocol number.')
128 raise ValueError(msg)
129 _build_ip_permissions(parameters, 'IpProtocol', value)
130
131
132 class PortArgument(CustomArgument):
133
134 def add_to_params(self, parameters, value):
135 if value:
136 try:
137 if value == '-1' or value == 'all':
138 fromstr = '-1'
139 tostr = '-1'
140 elif '-' in value:
141 fromstr, tostr = value.split('-')
142 else:
143 fromstr, tostr = (value, value)
144 _build_ip_permissions(parameters, 'FromPort', int(fromstr))
145 _build_ip_permissions(parameters, 'ToPort', int(tostr))
146 except ValueError:
147 msg = ('port parameter should be of the '
148 'form <from[-to]> (e.g. 22 or 22-25)')
149 raise ValueError(msg)
150
151
152 class CidrArgument(CustomArgument):
153
154 def add_to_params(self, parameters, value):
155 if value:
156 value = [{'CidrIp': value}]
157 _build_ip_permissions(parameters, 'IpRanges', value)
158
159
160 class SourceGroupArgument(CustomArgument):
161
162 def add_to_params(self, parameters, value):
163 if value:
164 if value.startswith('sg-'):
165 _build_ip_permissions(parameters, 'GroupId', value)
166 else:
167 _build_ip_permissions(parameters, 'GroupName', value)
168
169
170 class GroupOwnerArgument(CustomArgument):
171
172 def add_to_params(self, parameters, value):
173 if value:
174 _build_ip_permissions(parameters, 'UserId', value)
175
[end of awscli/customizations/ec2secgroupsimplify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/ec2secgroupsimplify.py b/awscli/customizations/ec2secgroupsimplify.py
--- a/awscli/customizations/ec2secgroupsimplify.py
+++ b/awscli/customizations/ec2secgroupsimplify.py
@@ -118,14 +118,17 @@
if value:
try:
int_value = int(value)
- if int_value < 0 or int_value > 255:
- msg = ('protocol numbers must be in the range 0-255')
+ if (int_value < 0 or int_value > 255) and int_value != -1:
+ msg = ('protocol numbers must be in the range 0-255 '
+ 'or -1 to specify all protocols')
raise ValueError(msg)
except ValueError:
- if value not in ('tcp', 'udp', 'icmp'):
+ if value not in ('tcp', 'udp', 'icmp', 'all'):
msg = ('protocol parameter should be one of: '
- 'tcp|udp|icmp or any valid protocol number.')
+ 'tcp|udp|icmp|all or any valid protocol number.')
raise ValueError(msg)
+ if value == 'all':
+ value = '-1'
_build_ip_permissions(parameters, 'IpProtocol', value)
| {"golden_diff": "diff --git a/awscli/customizations/ec2secgroupsimplify.py b/awscli/customizations/ec2secgroupsimplify.py\n--- a/awscli/customizations/ec2secgroupsimplify.py\n+++ b/awscli/customizations/ec2secgroupsimplify.py\n@@ -118,14 +118,17 @@\n if value:\n try:\n int_value = int(value)\n- if int_value < 0 or int_value > 255:\n- msg = ('protocol numbers must be in the range 0-255')\n+ if (int_value < 0 or int_value > 255) and int_value != -1:\n+ msg = ('protocol numbers must be in the range 0-255 '\n+ 'or -1 to specify all protocols')\n raise ValueError(msg)\n except ValueError:\n- if value not in ('tcp', 'udp', 'icmp'):\n+ if value not in ('tcp', 'udp', 'icmp', 'all'):\n msg = ('protocol parameter should be one of: '\n- 'tcp|udp|icmp or any valid protocol number.')\n+ 'tcp|udp|icmp|all or any valid protocol number.')\n raise ValueError(msg)\n+ if value == 'all':\n+ value = '-1'\n _build_ip_permissions(parameters, 'IpProtocol', value)\n", "issue": "No way to specify all for --protocol option\nThe `aws ec2 authorize-security-group-ingress` and `aws ec2 authorize-security-group-egress` commands both accept a `--protocol` option but there is no way to supply a value that represents ALL protocols.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds the following scalar parameters to the\nauthorize operations:\n\n* --protocol: tcp | udp | icmp or any protocol number\n* --port: A single integer or a range (min-max). You can specify ``all``\n to mean all ports (for example, port range 0-65535)\n* --source-group-name\n* --source-group-id\n* --cidr - The CIDR range. Cannot be used when specifying a source or\n destination security group.\n\"\"\"\n\nfrom awscli.arguments import CustomArgument\n\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = ProtocolArgument('protocol',\n help_text=PROTOCOL_DOCS)\n argument_table['protocol'] = arg\n arg = PortArgument('port', help_text=PORT_DOCS)\n argument_table['port'] = arg\n arg = CidrArgument('cidr', help_text=CIDR_DOCS)\n argument_table['cidr'] = arg\n arg = SourceGroupArgument('source-group',\n help_text=SOURCEGROUP_DOCS)\n argument_table['source-group'] = arg\n arg = GroupOwnerArgument('group-owner',\n help_text=GROUPOWNER_DOCS)\n argument_table['group-owner'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --ip-permissions option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['ip_permissions']:\n for key in ('protocol', 'port', 'cidr',\n 'source_group', 'group_owner'):\n if arg_dict[key]:\n msg = ('The --%s option is not compatible '\n 'with the --ip-permissions option ') % key\n raise ValueError(msg)\n\ndef _add_docs(help_command, **kwargs):\n doc = help_command.doc\n doc.style.new_paragraph()\n doc.style.start_note()\n msg = ('To specify multiple rules in a single command '\n 'use the <code>--ip-permissions</code> option')\n doc.include_doc_string(msg)\n doc.style.end_note()\n\n\nEVENTS = [\n ('building-argument-table.ec2.authorize-security-group-ingress', _add_params),\n ('building-argument-table.ec2.authorize-security-group-egress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-ingress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-egress', _add_params),\n ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),\n ('doc-description.ec2.authorize-security-group-ingress', _add_docs),\n ('doc-description.ec2.authorize-security-group-egress', _add_docs),\n ('doc-description.ec2.revoke-security-group-ingress', _add_docs),\n ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),\n ]\nPROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'\n '<p>Valid protocol values: <code>tcp</code>, '\n '<code>udp</code>, <code>icmp</code></p>')\nPORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'\n ' A single integer or a range (min-max). You can '\n 'specify <code>all</code> to mean all ports</p>')\nCIDR_DOCS = '<p>The CIDR IP range.</p>'\nSOURCEGROUP_DOCS = ('<p>The name of the source security group. '\n 'Cannot be used when specifying a CIDR IP address.')\nGROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '\n 'group. Cannot be used when specifying a CIDR IP '\n 'address.</p>')\n\ndef register_secgroup(event_handler):\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_ip_permissions(params, key, value):\n if 'ip_permissions' not in params:\n params['ip_permissions'] = [{}]\n if key == 'CidrIp':\n if 'IpRanges' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['IpRanges'] = []\n params['ip_permissions'][0]['IpRanges'].append(value)\n elif key in ('GroupId', 'GroupName', 'UserId'):\n if 'UserIdGroupPairs' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['UserIdGroupPairs'] = [{}]\n params['ip_permissions'][0]['UserIdGroupPairs'][0][key] = value\n else:\n params['ip_permissions'][0][key] = value\n\n\nclass ProtocolArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n int_value = int(value)\n if int_value < 0 or int_value > 255:\n msg = ('protocol numbers must be in the range 0-255')\n raise ValueError(msg)\n except ValueError:\n if value not in ('tcp', 'udp', 'icmp'):\n msg = ('protocol parameter should be one of: '\n 'tcp|udp|icmp or any valid protocol number.')\n raise ValueError(msg)\n _build_ip_permissions(parameters, 'IpProtocol', value)\n\n\nclass PortArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n if value == '-1' or value == 'all':\n fromstr = '-1'\n tostr = '-1'\n elif '-' in value:\n fromstr, tostr = value.split('-')\n else:\n fromstr, tostr = (value, value)\n _build_ip_permissions(parameters, 'FromPort', int(fromstr))\n _build_ip_permissions(parameters, 'ToPort', int(tostr))\n except ValueError:\n msg = ('port parameter should be of the '\n 'form <from[-to]> (e.g. 22 or 22-25)')\n raise ValueError(msg)\n\n\nclass CidrArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'CidrIp': value}]\n _build_ip_permissions(parameters, 'IpRanges', value)\n\n\nclass SourceGroupArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n if value.startswith('sg-'):\n _build_ip_permissions(parameters, 'GroupId', value)\n else:\n _build_ip_permissions(parameters, 'GroupName', value)\n\n\nclass GroupOwnerArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n _build_ip_permissions(parameters, 'UserId', value)\n", "path": "awscli/customizations/ec2secgroupsimplify.py"}]} | 2,683 | 290 |
gh_patches_debug_15523 | rasdani/github-patches | git_diff | docker__docker-py-1629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Question about stream flag when building image
[This section of the docs](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build) says:
> **stream (bool)** – Deprecated for API version > 1.8 (always True). Return a blocking generator you can iterate over to retrieve build output as it happens
However, if I attempt to run something like:
```py
docker.images.build(path='docker/example/', tag='example', stream=True)
```
It is blocked until the image is built, and returns the image then.
What's a "blocking generator" and how is it supposed to be used?
Thanks!
</issue>
<code>
[start of docker/models/images.py]
1 import re
2
3 import six
4
5 from ..api import APIClient
6 from ..errors import BuildError
7 from ..utils.json_stream import json_stream
8 from .resource import Collection, Model
9
10
11 class Image(Model):
12 """
13 An image on the server.
14 """
15 def __repr__(self):
16 return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
17
18 @property
19 def labels(self):
20 """
21 The labels of an image as dictionary.
22 """
23 result = self.attrs['Config'].get('Labels')
24 return result or {}
25
26 @property
27 def short_id(self):
28 """
29 The ID of the image truncated to 10 characters, plus the ``sha256:``
30 prefix.
31 """
32 if self.id.startswith('sha256:'):
33 return self.id[:17]
34 return self.id[:10]
35
36 @property
37 def tags(self):
38 """
39 The image's tags.
40 """
41 tags = self.attrs.get('RepoTags')
42 if tags is None:
43 tags = []
44 return [tag for tag in tags if tag != '<none>:<none>']
45
46 def history(self):
47 """
48 Show the history of an image.
49
50 Returns:
51 (str): The history of the image.
52
53 Raises:
54 :py:class:`docker.errors.APIError`
55 If the server returns an error.
56 """
57 return self.client.api.history(self.id)
58
59 def save(self):
60 """
61 Get a tarball of an image. Similar to the ``docker save`` command.
62
63 Returns:
64 (urllib3.response.HTTPResponse object): The response from the
65 daemon.
66
67 Raises:
68 :py:class:`docker.errors.APIError`
69 If the server returns an error.
70
71 Example:
72
73 >>> image = cli.images.get("fedora:latest")
74 >>> resp = image.save()
75 >>> f = open('/tmp/fedora-latest.tar', 'w')
76 >>> for chunk in resp.stream():
77 >>> f.write(chunk)
78 >>> f.close()
79 """
80 return self.client.api.get_image(self.id)
81
82 def tag(self, repository, tag=None, **kwargs):
83 """
84 Tag this image into a repository. Similar to the ``docker tag``
85 command.
86
87 Args:
88 repository (str): The repository to set for the tag
89 tag (str): The tag name
90 force (bool): Force
91
92 Raises:
93 :py:class:`docker.errors.APIError`
94 If the server returns an error.
95
96 Returns:
97 (bool): ``True`` if successful
98 """
99 self.client.api.tag(self.id, repository, tag=tag, **kwargs)
100
101
102 class ImageCollection(Collection):
103 model = Image
104
105 def build(self, **kwargs):
106 """
107 Build an image and return it. Similar to the ``docker build``
108 command. Either ``path`` or ``fileobj`` must be set.
109
110 If you have a tar file for the Docker build context (including a
111 Dockerfile) already, pass a readable file-like object to ``fileobj``
112 and also pass ``custom_context=True``. If the stream is compressed
113 also, set ``encoding`` to the correct value (e.g ``gzip``).
114
115 If you want to get the raw output of the build, use the
116 :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
117 low-level API.
118
119 Args:
120 path (str): Path to the directory containing the Dockerfile
121 fileobj: A file object to use as the Dockerfile. (Or a file-like
122 object)
123 tag (str): A tag to add to the final image
124 quiet (bool): Whether to return the status
125 nocache (bool): Don't use the cache when set to ``True``
126 rm (bool): Remove intermediate containers. The ``docker build``
127 command now defaults to ``--rm=true``, but we have kept the old
128 default of `False` to preserve backward compatibility
129 stream (bool): *Deprecated for API version > 1.8 (always True)*.
130 Return a blocking generator you can iterate over to retrieve
131 build output as it happens
132 timeout (int): HTTP timeout
133 custom_context (bool): Optional if using ``fileobj``
134 encoding (str): The encoding for a stream. Set to ``gzip`` for
135 compressing
136 pull (bool): Downloads any updates to the FROM image in Dockerfiles
137 forcerm (bool): Always remove intermediate containers, even after
138 unsuccessful builds
139 dockerfile (str): path within the build context to the Dockerfile
140 buildargs (dict): A dictionary of build arguments
141 container_limits (dict): A dictionary of limits applied to each
142 container created by the build process. Valid keys:
143
144 - memory (int): set memory limit for build
145 - memswap (int): Total memory (memory + swap), -1 to disable
146 swap
147 - cpushares (int): CPU shares (relative weight)
148 - cpusetcpus (str): CPUs in which to allow execution, e.g.,
149 ``"0-3"``, ``"0,1"``
150 decode (bool): If set to ``True``, the returned stream will be
151 decoded into dicts on the fly. Default ``False``.
152 cache_from (list): A list of images used for build cache
153 resolution.
154 target (str): Name of the build-stage to build in a multi-stage
155 Dockerfile.
156
157 Returns:
158 (:py:class:`Image`): The built image.
159
160 Raises:
161 :py:class:`docker.errors.BuildError`
162 If there is an error during the build.
163 :py:class:`docker.errors.APIError`
164 If the server returns any other error.
165 ``TypeError``
166 If neither ``path`` nor ``fileobj`` is specified.
167 """
168 resp = self.client.api.build(**kwargs)
169 if isinstance(resp, six.string_types):
170 return self.get(resp)
171 last_event = None
172 for chunk in json_stream(resp):
173 if 'error' in chunk:
174 raise BuildError(chunk['error'])
175 if 'stream' in chunk:
176 match = re.search(
177 r'(Successfully built |sha256:)([0-9a-f]+)',
178 chunk['stream']
179 )
180 if match:
181 image_id = match.group(2)
182 return self.get(image_id)
183 last_event = chunk
184
185 raise BuildError(last_event or 'Unknown')
186
187 def get(self, name):
188 """
189 Gets an image.
190
191 Args:
192 name (str): The name of the image.
193
194 Returns:
195 (:py:class:`Image`): The image.
196
197 Raises:
198 :py:class:`docker.errors.ImageNotFound`
199 If the image does not exist.
200 :py:class:`docker.errors.APIError`
201 If the server returns an error.
202 """
203 return self.prepare_model(self.client.api.inspect_image(name))
204
205 def list(self, name=None, all=False, filters=None):
206 """
207 List images on the server.
208
209 Args:
210 name (str): Only show images belonging to the repository ``name``
211 all (bool): Show intermediate image layers. By default, these are
212 filtered out.
213 filters (dict): Filters to be processed on the image list.
214 Available filters:
215 - ``dangling`` (bool)
216 - ``label`` (str): format either ``key`` or ``key=value``
217
218 Returns:
219 (list of :py:class:`Image`): The images.
220
221 Raises:
222 :py:class:`docker.errors.APIError`
223 If the server returns an error.
224 """
225 resp = self.client.api.images(name=name, all=all, filters=filters)
226 return [self.prepare_model(r) for r in resp]
227
228 def load(self, data):
229 """
230 Load an image that was previously saved using
231 :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
232 Similar to ``docker load``.
233
234 Args:
235 data (binary): Image data to be loaded.
236
237 Raises:
238 :py:class:`docker.errors.APIError`
239 If the server returns an error.
240 """
241 return self.client.api.load_image(data)
242
243 def pull(self, name, tag=None, **kwargs):
244 """
245 Pull an image of the given name and return it. Similar to the
246 ``docker pull`` command.
247
248 If you want to get the raw pull output, use the
249 :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
250 low-level API.
251
252 Args:
253 repository (str): The repository to pull
254 tag (str): The tag to pull
255 insecure_registry (bool): Use an insecure registry
256 auth_config (dict): Override the credentials that
257 :py:meth:`~docker.client.DockerClient.login` has set for
258 this request. ``auth_config`` should contain the ``username``
259 and ``password`` keys to be valid.
260
261 Returns:
262 (:py:class:`Image`): The image that has been pulled.
263
264 Raises:
265 :py:class:`docker.errors.APIError`
266 If the server returns an error.
267
268 Example:
269
270 >>> image = client.images.pull('busybox')
271 """
272 self.client.api.pull(name, tag=tag, **kwargs)
273 return self.get('{0}:{1}'.format(name, tag) if tag else name)
274
275 def push(self, repository, tag=None, **kwargs):
276 return self.client.api.push(repository, tag=tag, **kwargs)
277 push.__doc__ = APIClient.push.__doc__
278
279 def remove(self, *args, **kwargs):
280 self.client.api.remove_image(*args, **kwargs)
281 remove.__doc__ = APIClient.remove_image.__doc__
282
283 def search(self, *args, **kwargs):
284 return self.client.api.search(*args, **kwargs)
285 search.__doc__ = APIClient.search.__doc__
286
287 def prune(self, filters=None):
288 return self.client.api.prune_images(filters=filters)
289 prune.__doc__ = APIClient.prune_images.__doc__
290
[end of docker/models/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/models/images.py b/docker/models/images.py
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -126,9 +126,6 @@
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
- stream (bool): *Deprecated for API version > 1.8 (always True)*.
- Return a blocking generator you can iterate over to retrieve
- build output as it happens
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
| {"golden_diff": "diff --git a/docker/models/images.py b/docker/models/images.py\n--- a/docker/models/images.py\n+++ b/docker/models/images.py\n@@ -126,9 +126,6 @@\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n- stream (bool): *Deprecated for API version > 1.8 (always True)*.\n- Return a blocking generator you can iterate over to retrieve\n- build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n", "issue": "Question about stream flag when building image\n[This section of the docs](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build) says:\r\n\r\n> **stream (bool)** \u2013 Deprecated for API version > 1.8 (always True). Return a blocking generator you can iterate over to retrieve build output as it happens\r\n\r\nHowever, if I attempt to run something like:\r\n\r\n```py\r\ndocker.images.build(path='docker/example/', tag='example', stream=True)\r\n```\r\n\r\nIt is blocked until the image is built, and returns the image then.\r\n\r\nWhat's a \"blocking generator\" and how is it supposed to be used?\r\n\r\nThanks!\n", "before_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def labels(self):\n \"\"\"\n The labels of an image as dictionary.\n \"\"\"\n result = self.attrs['Config'].get('Labels')\n return result or {}\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n tags = self.attrs.get('RepoTags')\n if tags is None:\n tags = []\n return [tag for tag in tags if tag != '<none>:<none>']\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.images.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> for chunk in resp.stream():\n >>> f.write(chunk)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n cache_from (list): A list of images used for build cache\n resolution.\n target (str): Name of the build-stage to build in a multi-stage\n Dockerfile.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n last_event = None\n for chunk in json_stream(resp):\n if 'error' in chunk:\n raise BuildError(chunk['error'])\n if 'stream' in chunk:\n match = re.search(\n r'(Successfully built |sha256:)([0-9a-f]+)',\n chunk['stream']\n )\n if match:\n image_id = match.group(2)\n return self.get(image_id)\n last_event = chunk\n\n raise BuildError(last_event or 'Unknown')\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound`\n If the image does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, tag=None, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, tag=tag, **kwargs)\n return self.get('{0}:{1}'.format(name, tag) if tag else name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n\n def prune(self, filters=None):\n return self.client.api.prune_images(filters=filters)\n prune.__doc__ = APIClient.prune_images.__doc__\n", "path": "docker/models/images.py"}]} | 3,615 | 165 |
gh_patches_debug_24353 | rasdani/github-patches | git_diff | armada-ths__ais-766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable debug in production
https://app.asana.com/0/1204005227675382/1204038385197069/f
Currently the Django debug flag is true in the production environment, leading to security issues.
</issue>
<code>
[start of ais/common/settings.py]
1 """
2 This is the settings file containing settings common to both the
3 development and production environments.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/1.8/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/1.8/ref/settings/
10 """
11
12 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
13 import os
14 from os import path
15
16 BASE_DIR = path.join(path.dirname(path.abspath(__file__)), '../../')
17
18 CRISPY_TEMPLATE_PACK = 'bootstrap3'
19
20 INSTALLED_APPS = (
21 'django.contrib.admin',
22 'django.contrib.auth',
23 'django.contrib.contenttypes',
24 'django.contrib.sessions',
25 'django.contrib.messages',
26 'django.contrib.staticfiles',
27 'django.contrib.gis',
28 'corsheaders',
29 'webpack_loader',
30 'polymorphic',
31 'events',
32 'companies',
33 'fair',
34 'people',
35 'locations',
36 'recruitment',
37 'api',
38 'news',
39 'orders',
40 'unirel',
41 'crispy_forms',
42 'exhibitors',
43 'django.contrib.humanize',
44 'banquet',
45 'register',
46 'matching',
47 'student_profiles',
48 'transportation',
49 'accounting',
50 'dynamic_formsets',
51 'journal',
52 'markupfield',
53 'testpage',
54 )
55
56 MIDDLEWARE = [
57 'corsheaders.middleware.CorsMiddleware',
58 'django.contrib.sessions.middleware.SessionMiddleware',
59 'django.middleware.common.CommonMiddleware',
60 'django.middleware.csrf.CsrfViewMiddleware',
61 'django.middleware.gzip.GZipMiddleware',
62 'django.contrib.auth.middleware.AuthenticationMiddleware',
63 'django.contrib.messages.middleware.MessageMiddleware',
64 'django.middleware.clickjacking.XFrameOptionsMiddleware',
65 'django.middleware.security.SecurityMiddleware',
66 'recruitment.middleware.LoginRequiredMiddleware'
67 ]
68
69 USE_ETAGS = True
70
71 AUTHENTICATION_BACKENDS = (
72 'django.contrib.auth.backends.ModelBackend',
73 )
74
75 TEMPLATES = [
76 {
77 'BACKEND': 'django.template.backends.django.DjangoTemplates',
78 'DIRS': [path.join(BASE_DIR, "templates")],
79 'APP_DIRS': True,
80 'OPTIONS': {
81 'context_processors': [
82 'django.template.context_processors.debug',
83 'django.template.context_processors.request',
84 'django.contrib.auth.context_processors.auth',
85 'django.contrib.messages.context_processors.messages',
86 ],
87 },
88 },
89 ]
90
91 WEBPACK_LOADER = {
92 'DEFAULT': {
93 'BUNDLE_DIR_NAME': 'bundles/',
94 'STATS_FILE': path.join(BASE_DIR, 'webpack-stats.js')
95 }
96 }
97
98 WSGI_APPLICATION = 'ais.wsgi.application'
99
100 STATIC_ROOT = path.join(BASE_DIR, "static")
101 STATIC_URL = '/static/'
102 STATICFILES_DIRS = (
103 path.join(BASE_DIR, "ais_static"),
104 )
105
106 ADMIN_MEDIA_PREFIX = '/static/admin/'
107 MEDIA_ROOT = path.abspath(path.join(BASE_DIR, '..', 'media'))
108
109 MEDIA_URL = '/media/'
110
111 # Internationalization
112 # https://docs.djangoproject.com/en/1.8/topics/i18n/
113 LANGUAGE_CODE = 'en-us'
114 TIME_ZONE = 'Europe/Stockholm'
115 USE_I18N = True
116 LANGUAGE_CODE = 'en-us'
117 USE_L10N = False
118 DATE_FORMAT = "M j, Y"
119 DATETIME_FORMAT = "M j, Y, H:i"
120
[end of ais/common/settings.py]
[start of ais/production/settings.py]
1 """
2 This is the settings file to be used in a production environment. It's
3 more secure, more robust and more performant than the development setup
4 and also configures AIS to talk to external services.
5 """
6
7 import os
8 from ais.common.settings import *
9
10 SECRET_KEY = os.environ.get('SECRET_KEY')
11 # This is important so other people can't set their own domains
12 # to point to AIS (which would be a security concern).
13 ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']
14
15 DEBUG = True
16
17 # The URL scheme is slightly different in a production environment
18 # since we need to accomodate the KTH OpenID Connect integration.
19 ROOT_URLCONF = 'ais.production.urls'
20
21 # Use KTH OpenID Connect for authentication
22 INSTALLED_APPS += ('kth_login','raven.contrib.django.raven_compat',)
23
24 # Use a full-fledged database instead of SQLite.
25 DATABASES = {
26 'default': {
27 'ENGINE': 'django.contrib.gis.db.backends.postgis',
28 'NAME': os.environ.get('DB_NAME', 'ais_dev'),
29 'USER': os.environ.get('DB_USERNAME', 'ais_dev'),
30 'PASSWORD': os.environ.get('DB_PASSWORD', 'ais_dev'),
31 'HOST': os.environ.get('DB_HOST', '127.0.0.1'),
32 'PORT': os.environ.get('DB_PORT', '5432'),
33 }
34 }
35
36 # SENTRY
37 RAVEN_CONFIG = {
38 'dsn': 'https://%s:%[email protected]/%s' % (os.environ.get('SENTRY_USERNAME'), os.environ.get('SENTRY_PASSWORD'), os.environ.get('SENTRY_APPID')),
39 'processors': ('raven.processors.Processor',)
40 }
41
42 CACHES = {
43 'default': {
44 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
45 }
46 }
47
48 LOGGING = {
49 'version': 1,
50 'disable_existing_loggers': True,
51 'formatters': {
52 'verbose': {
53 'format': '%(levelname)s %(asctime)s %(module)s '
54 '%(process)d %(thread)d %(message)s'
55 },
56 },
57 'handlers': {
58 'sentry': {
59 'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.
60 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
61 'tags': {'custom-tag': 'x'},
62 },
63 'console': {
64 'level': 'DEBUG',
65 'class': 'logging.StreamHandler',
66 'formatter': 'verbose'
67 }
68 },
69 'loggers': {
70 'root': {
71 'level': 'WARNING',
72 'handlers': ['sentry'],
73 },
74 'django.db.backends': {
75 'level': 'ERROR',
76 'handlers': ['console'],
77 'propagate': False,
78 },
79 'raven': {
80 'level': 'DEBUG',
81 'handlers': ['console'],
82 'propagate': False,
83 },
84 'sentry.errors': {
85 'level': 'DEBUG',
86 'handlers': ['console'],
87 'propagate': False,
88 },
89 },
90 }
91
92 # The system sends out system-related emails to these addresses.
93 ADMINS = MANAGERS = (
94 ('System', '[email protected]'),
95 )
96
97 # This is for AUTHLIB package for interacting with KTH OpenID Connect
98 # APPLICATION_ID is given from the 'secrets.py' file.
99 # CLIENT_SECRET is given from the 'secrets.py' file.
100 AUTHLIB_OAUTH_CLIENTS = {
101 'kth': {
102 'client_id': os.environ.get('APPLICATION_ID'),
103 'client_secret': os.environ.get('CLIENT_SECRET'),
104 'api_base_url': 'https://login.ug.kth.se/adfs/oauth2/',
105 }
106 }
107 LOGOUT_REDIRECT_URL = '/'
108
[end of ais/production/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ais/common/settings.py b/ais/common/settings.py
--- a/ais/common/settings.py
+++ b/ais/common/settings.py
@@ -18,6 +18,7 @@
CRISPY_TEMPLATE_PACK = 'bootstrap3'
INSTALLED_APPS = (
+ 'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
@@ -55,6 +56,8 @@
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
+ "django.middleware.security.SecurityMiddleware",
+ "whitenoise.middleware.WhiteNoiseMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
diff --git a/ais/production/settings.py b/ais/production/settings.py
--- a/ais/production/settings.py
+++ b/ais/production/settings.py
@@ -12,7 +12,7 @@
# to point to AIS (which would be a security concern).
ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']
-DEBUG = True
+DEBUG = False
# The URL scheme is slightly different in a production environment
# since we need to accomodate the KTH OpenID Connect integration.
| {"golden_diff": "diff --git a/ais/common/settings.py b/ais/common/settings.py\n--- a/ais/common/settings.py\n+++ b/ais/common/settings.py\n@@ -18,6 +18,7 @@\n CRISPY_TEMPLATE_PACK = 'bootstrap3'\n \n INSTALLED_APPS = (\n+ 'whitenoise.runserver_nostatic',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n@@ -55,6 +56,8 @@\n \n MIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n+ \"django.middleware.security.SecurityMiddleware\",\n+ \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\ndiff --git a/ais/production/settings.py b/ais/production/settings.py\n--- a/ais/production/settings.py\n+++ b/ais/production/settings.py\n@@ -12,7 +12,7 @@\n # to point to AIS (which would be a security concern).\n ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']\n \n-DEBUG = True\n+DEBUG = False\n \n # The URL scheme is slightly different in a production environment\n # since we need to accomodate the KTH OpenID Connect integration.\n", "issue": "Disable debug in production\nhttps://app.asana.com/0/1204005227675382/1204038385197069/f\r\n\r\nCurrently the Django debug flag is true in the production environment, leading to security issues.\n", "before_files": [{"content": "\"\"\"\nThis is the settings file containing settings common to both the\ndevelopment and production environments.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom os import path\n\nBASE_DIR = path.join(path.dirname(path.abspath(__file__)), '../../')\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'corsheaders',\n 'webpack_loader',\n 'polymorphic',\n 'events',\n 'companies',\n 'fair',\n 'people',\n 'locations',\n 'recruitment',\n 'api',\n 'news',\n 'orders',\n 'unirel',\n 'crispy_forms',\n 'exhibitors',\n 'django.contrib.humanize',\n 'banquet',\n 'register',\n 'matching',\n 'student_profiles',\n 'transportation',\n 'accounting',\n 'dynamic_formsets',\n 'journal',\n 'markupfield',\n 'testpage',\n)\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'recruitment.middleware.LoginRequiredMiddleware'\n]\n\nUSE_ETAGS = True\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(BASE_DIR, \"templates\")],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': 'bundles/',\n 'STATS_FILE': path.join(BASE_DIR, 'webpack-stats.js')\n }\n}\n\nWSGI_APPLICATION = 'ais.wsgi.application'\n\nSTATIC_ROOT = path.join(BASE_DIR, \"static\")\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n path.join(BASE_DIR, \"ais_static\"),\n)\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\nMEDIA_ROOT = path.abspath(path.join(BASE_DIR, '..', 'media'))\n\nMEDIA_URL = '/media/'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Europe/Stockholm'\nUSE_I18N = True\nLANGUAGE_CODE = 'en-us'\nUSE_L10N = False\nDATE_FORMAT = \"M j, Y\"\nDATETIME_FORMAT = \"M j, Y, H:i\"\n", "path": "ais/common/settings.py"}, {"content": "\"\"\"\nThis is the settings file to be used in a production environment. It's\nmore secure, more robust and more performant than the development setup\nand also configures AIS to talk to external services.\n\"\"\"\n\nimport os\nfrom ais.common.settings import *\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n# This is important so other people can't set their own domains\n# to point to AIS (which would be a security concern).\nALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']\n\nDEBUG = True\n\n# The URL scheme is slightly different in a production environment\n# since we need to accomodate the KTH OpenID Connect integration.\nROOT_URLCONF = 'ais.production.urls'\n\n# Use KTH OpenID Connect for authentication\nINSTALLED_APPS += ('kth_login','raven.contrib.django.raven_compat',)\n\n# Use a full-fledged database instead of SQLite.\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': os.environ.get('DB_NAME', 'ais_dev'),\n 'USER': os.environ.get('DB_USERNAME', 'ais_dev'),\n 'PASSWORD': os.environ.get('DB_PASSWORD', 'ais_dev'),\n 'HOST': os.environ.get('DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DB_PORT', '5432'),\n }\n}\n\n# SENTRY\nRAVEN_CONFIG = {\n 'dsn': 'https://%s:%[email protected]/%s' % (os.environ.get('SENTRY_USERNAME'), os.environ.get('SENTRY_PASSWORD'), os.environ.get('SENTRY_APPID')),\n 'processors': ('raven.processors.Processor',)\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\n# The system sends out system-related emails to these addresses.\nADMINS = MANAGERS = (\n ('System', '[email protected]'),\n)\n\n# This is for AUTHLIB package for interacting with KTH OpenID Connect\n# APPLICATION_ID is given from the 'secrets.py' file.\n# CLIENT_SECRET is given from the 'secrets.py' file.\nAUTHLIB_OAUTH_CLIENTS = {\n 'kth': {\n 'client_id': os.environ.get('APPLICATION_ID'),\n 'client_secret': os.environ.get('CLIENT_SECRET'),\n 'api_base_url': 'https://login.ug.kth.se/adfs/oauth2/',\n }\n}\nLOGOUT_REDIRECT_URL = '/'\n", "path": "ais/production/settings.py"}]} | 2,634 | 287 |
gh_patches_debug_19296 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pass `Accept` header in `contrib.utils.download`
I'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.
Regarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. "[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says "Custom content types are no longer supported since January 1st, 2020". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).
</issue>
<code>
[start of src/pyhf/contrib/utils.py]
1 """Helper utilities for common tasks."""
2
3 from urllib.parse import urlparse
4 import tarfile
5 from io import BytesIO
6 import logging
7 from pyhf import exceptions
8
9 log = logging.getLogger(__name__)
10
11 __all__ = ["download"]
12
13
14 def __dir__():
15 return __all__
16
17
18 try:
19 import requests
20
21 def download(archive_url, output_directory, force=False, compress=False):
22 """
23 Download the patchset archive from the remote URL and extract it in a
24 directory at the path given.
25
26 Example:
27
28 >>> from pyhf.contrib.utils import download
29 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
30 >>> import os
31 >>> sorted(os.listdir("1Lbb-likelihoods"))
32 ['BkgOnly.json', 'README.md', 'patchset.json']
33 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
34 >>> import glob
35 >>> glob.glob("1Lbb-likelihoods.tar.gz")
36 ['1Lbb-likelihoods.tar.gz']
37
38 Args:
39 archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.
40 output_directory (:obj:`str`): Name of the directory to unpack the archive into.
41 force (:obj:`bool`): Force download from non-approved host. Default is ``False``.
42 compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.
43
44 Raises:
45 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
46 """
47 if not force:
48 valid_hosts = ["www.hepdata.net", "doi.org"]
49 netloc = urlparse(archive_url).netloc
50 if netloc not in valid_hosts:
51 raise exceptions.InvalidArchiveHost(
52 f"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\n"
53 + "To download an archive from this host use the --force option."
54 )
55
56 with requests.get(archive_url) as response:
57 if compress:
58 with open(output_directory, "wb") as archive:
59 archive.write(response.content)
60 else:
61 with tarfile.open(
62 mode="r|gz", fileobj=BytesIO(response.content)
63 ) as archive:
64 archive.extractall(output_directory)
65
66
67 except ModuleNotFoundError:
68 log.error(
69 "\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download"
70 + "\nPlease install with: python -m pip install pyhf[contrib]\n",
71 exc_info=True,
72 )
73
[end of src/pyhf/contrib/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -53,7 +53,18 @@
+ "To download an archive from this host use the --force option."
)
- with requests.get(archive_url) as response:
+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491
+ # > Use content negotiation at the landing page for the resource that
+ # > the DOI resolves to. DataCite content negotiation is forwarding all
+ # > requests with unknown content types to the URL registered in the
+ # > handle system.
+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/
+ # The HEPData landing page for the resource file can check if the Accept
+ # request HTTP header matches the content type of the resource file and
+ # return the content directly if so.
+ with requests.get(
+ archive_url, headers={"Accept": "application/x-tar"}
+ ) as response:
if compress:
with open(output_directory, "wb") as archive:
archive.write(response.content)
| {"golden_diff": "diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py\n--- a/src/pyhf/contrib/utils.py\n+++ b/src/pyhf/contrib/utils.py\n@@ -53,7 +53,18 @@\n + \"To download an archive from this host use the --force option.\"\n )\n \n- with requests.get(archive_url) as response:\n+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491\n+ # > Use content negotiation at the landing page for the resource that\n+ # > the DOI resolves to. DataCite content negotiation is forwarding all\n+ # > requests with unknown content types to the URL registered in the\n+ # > handle system.\n+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/\n+ # The HEPData landing page for the resource file can check if the Accept\n+ # request HTTP header matches the content type of the resource file and\n+ # return the content directly if so.\n+ with requests.get(\n+ archive_url, headers={\"Accept\": \"application/x-tar\"}\n+ ) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n", "issue": "Pass `Accept` header in `contrib.utils.download`\nI'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.\r\n\r\nRegarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. \"[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)\", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says \"Custom content types are no longer supported since January 1st, 2020\". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).\n", "before_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom pyhf import exceptions\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}]} | 1,690 | 280 |
gh_patches_debug_23232 | rasdani/github-patches | git_diff | matrix-org__synapse-8567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase in errors due to no active logging span
Starting on Oct 1 (which corresponds with a re-deploy of matrix.org) we've been seeing a lot more of the following error:
> There was no active span when trying to inject the span into a byte dict. Did you forget to start one or did a context slip?
https://sentry.matrix.org/sentry/synapse-matrixorg/issues/15795/
</issue>
<code>
[start of synapse/metrics/background_process_metrics.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2018 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import inspect
17 import logging
18 import threading
19 from functools import wraps
20 from typing import TYPE_CHECKING, Dict, Optional, Set
21
22 from prometheus_client.core import REGISTRY, Counter, Gauge
23
24 from twisted.internet import defer
25
26 from synapse.logging.context import LoggingContext, PreserveLoggingContext
27
28 if TYPE_CHECKING:
29 import resource
30
31
32 logger = logging.getLogger(__name__)
33
34
35 _background_process_start_count = Counter(
36 "synapse_background_process_start_count",
37 "Number of background processes started",
38 ["name"],
39 )
40
41 _background_process_in_flight_count = Gauge(
42 "synapse_background_process_in_flight_count",
43 "Number of background processes in flight",
44 labelnames=["name"],
45 )
46
47 # we set registry=None in all of these to stop them getting registered with
48 # the default registry. Instead we collect them all via the CustomCollector,
49 # which ensures that we can update them before they are collected.
50 #
51 _background_process_ru_utime = Counter(
52 "synapse_background_process_ru_utime_seconds",
53 "User CPU time used by background processes, in seconds",
54 ["name"],
55 registry=None,
56 )
57
58 _background_process_ru_stime = Counter(
59 "synapse_background_process_ru_stime_seconds",
60 "System CPU time used by background processes, in seconds",
61 ["name"],
62 registry=None,
63 )
64
65 _background_process_db_txn_count = Counter(
66 "synapse_background_process_db_txn_count",
67 "Number of database transactions done by background processes",
68 ["name"],
69 registry=None,
70 )
71
72 _background_process_db_txn_duration = Counter(
73 "synapse_background_process_db_txn_duration_seconds",
74 (
75 "Seconds spent by background processes waiting for database "
76 "transactions, excluding scheduling time"
77 ),
78 ["name"],
79 registry=None,
80 )
81
82 _background_process_db_sched_duration = Counter(
83 "synapse_background_process_db_sched_duration_seconds",
84 "Seconds spent by background processes waiting for database connections",
85 ["name"],
86 registry=None,
87 )
88
89 # map from description to a counter, so that we can name our logcontexts
90 # incrementally. (It actually duplicates _background_process_start_count, but
91 # it's much simpler to do so than to try to combine them.)
92 _background_process_counts = {} # type: Dict[str, int]
93
94 # Set of all running background processes that became active active since the
95 # last time metrics were scraped (i.e. background processes that performed some
96 # work since the last scrape.)
97 #
98 # We do it like this to handle the case where we have a large number of
99 # background processes stacking up behind a lock or linearizer, where we then
100 # only need to iterate over and update metrics for the process that have
101 # actually been active and can ignore the idle ones.
102 _background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]
103
104 # A lock that covers the above set and dict
105 _bg_metrics_lock = threading.Lock()
106
107
108 class _Collector:
109 """A custom metrics collector for the background process metrics.
110
111 Ensures that all of the metrics are up-to-date with any in-flight processes
112 before they are returned.
113 """
114
115 def collect(self):
116 global _background_processes_active_since_last_scrape
117
118 # We swap out the _background_processes set with an empty one so that
119 # we can safely iterate over the set without holding the lock.
120 with _bg_metrics_lock:
121 _background_processes_copy = _background_processes_active_since_last_scrape
122 _background_processes_active_since_last_scrape = set()
123
124 for process in _background_processes_copy:
125 process.update_metrics()
126
127 # now we need to run collect() over each of the static Counters, and
128 # yield each metric they return.
129 for m in (
130 _background_process_ru_utime,
131 _background_process_ru_stime,
132 _background_process_db_txn_count,
133 _background_process_db_txn_duration,
134 _background_process_db_sched_duration,
135 ):
136 for r in m.collect():
137 yield r
138
139
140 REGISTRY.register(_Collector())
141
142
143 class _BackgroundProcess:
144 def __init__(self, desc, ctx):
145 self.desc = desc
146 self._context = ctx
147 self._reported_stats = None
148
149 def update_metrics(self):
150 """Updates the metrics with values from this process."""
151 new_stats = self._context.get_resource_usage()
152 if self._reported_stats is None:
153 diff = new_stats
154 else:
155 diff = new_stats - self._reported_stats
156 self._reported_stats = new_stats
157
158 _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
159 _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
160 _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
161 _background_process_db_txn_duration.labels(self.desc).inc(
162 diff.db_txn_duration_sec
163 )
164 _background_process_db_sched_duration.labels(self.desc).inc(
165 diff.db_sched_duration_sec
166 )
167
168
169 def run_as_background_process(desc: str, func, *args, **kwargs):
170 """Run the given function in its own logcontext, with resource metrics
171
172 This should be used to wrap processes which are fired off to run in the
173 background, instead of being associated with a particular request.
174
175 It returns a Deferred which completes when the function completes, but it doesn't
176 follow the synapse logcontext rules, which makes it appropriate for passing to
177 clock.looping_call and friends (or for firing-and-forgetting in the middle of a
178 normal synapse async function).
179
180 Args:
181 desc: a description for this background process type
182 func: a function, which may return a Deferred or a coroutine
183 args: positional args for func
184 kwargs: keyword args for func
185
186 Returns: Deferred which returns the result of func, but note that it does not
187 follow the synapse logcontext rules.
188 """
189
190 async def run():
191 with _bg_metrics_lock:
192 count = _background_process_counts.get(desc, 0)
193 _background_process_counts[desc] = count + 1
194
195 _background_process_start_count.labels(desc).inc()
196 _background_process_in_flight_count.labels(desc).inc()
197
198 with BackgroundProcessLoggingContext(desc) as context:
199 context.request = "%s-%i" % (desc, count)
200
201 try:
202 result = func(*args, **kwargs)
203
204 if inspect.isawaitable(result):
205 result = await result
206
207 return result
208 except Exception:
209 logger.exception(
210 "Background process '%s' threw an exception", desc,
211 )
212 finally:
213 _background_process_in_flight_count.labels(desc).dec()
214
215 with PreserveLoggingContext():
216 # Note that we return a Deferred here so that it can be used in a
217 # looping_call and other places that expect a Deferred.
218 return defer.ensureDeferred(run())
219
220
221 def wrap_as_background_process(desc):
222 """Decorator that wraps a function that gets called as a background
223 process.
224
225 Equivalent of calling the function with `run_as_background_process`
226 """
227
228 def wrap_as_background_process_inner(func):
229 @wraps(func)
230 def wrap_as_background_process_inner_2(*args, **kwargs):
231 return run_as_background_process(desc, func, *args, **kwargs)
232
233 return wrap_as_background_process_inner_2
234
235 return wrap_as_background_process_inner
236
237
238 class BackgroundProcessLoggingContext(LoggingContext):
239 """A logging context that tracks in flight metrics for background
240 processes.
241 """
242
243 __slots__ = ["_proc"]
244
245 def __init__(self, name: str):
246 super().__init__(name)
247
248 self._proc = _BackgroundProcess(name, self)
249
250 def start(self, rusage: "Optional[resource._RUsage]"):
251 """Log context has started running (again).
252 """
253
254 super().start(rusage)
255
256 # We've become active again so we make sure we're in the list of active
257 # procs. (Note that "start" here means we've become active, as opposed
258 # to starting for the first time.)
259 with _bg_metrics_lock:
260 _background_processes_active_since_last_scrape.add(self._proc)
261
262 def __exit__(self, type, value, traceback) -> None:
263 """Log context has finished.
264 """
265
266 super().__exit__(type, value, traceback)
267
268 # The background process has finished. We explictly remove and manually
269 # update the metrics here so that if nothing is scraping metrics the set
270 # doesn't infinitely grow.
271 with _bg_metrics_lock:
272 _background_processes_active_since_last_scrape.discard(self._proc)
273
274 self._proc.update_metrics()
275
[end of synapse/metrics/background_process_metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -24,6 +24,7 @@
from twisted.internet import defer
from synapse.logging.context import LoggingContext, PreserveLoggingContext
+from synapse.logging.opentracing import start_active_span
if TYPE_CHECKING:
import resource
@@ -197,14 +198,14 @@
with BackgroundProcessLoggingContext(desc) as context:
context.request = "%s-%i" % (desc, count)
-
try:
- result = func(*args, **kwargs)
+ with start_active_span(desc, tags={"request_id": context.request}):
+ result = func(*args, **kwargs)
- if inspect.isawaitable(result):
- result = await result
+ if inspect.isawaitable(result):
+ result = await result
- return result
+ return result
except Exception:
logger.exception(
"Background process '%s' threw an exception", desc,
| {"golden_diff": "diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py\n--- a/synapse/metrics/background_process_metrics.py\n+++ b/synapse/metrics/background_process_metrics.py\n@@ -24,6 +24,7 @@\n from twisted.internet import defer\n \n from synapse.logging.context import LoggingContext, PreserveLoggingContext\n+from synapse.logging.opentracing import start_active_span\n \n if TYPE_CHECKING:\n import resource\n@@ -197,14 +198,14 @@\n \n with BackgroundProcessLoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n-\n try:\n- result = func(*args, **kwargs)\n+ with start_active_span(desc, tags={\"request_id\": context.request}):\n+ result = func(*args, **kwargs)\n \n- if inspect.isawaitable(result):\n- result = await result\n+ if inspect.isawaitable(result):\n+ result = await result\n \n- return result\n+ return result\n except Exception:\n logger.exception(\n \"Background process '%s' threw an exception\", desc,\n", "issue": "Increase in errors due to no active logging span\nStarting on Oct 1 (which corresponds with a re-deploy of matrix.org) we've been seeing a lot more of the following error:\r\n\r\n> There was no active span when trying to inject the span into a byte dict. Did you forget to start one or did a context slip?\r\n\r\nhttps://sentry.matrix.org/sentry/synapse-matrixorg/issues/15795/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport logging\nimport threading\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Dict, Optional, Set\n\nfrom prometheus_client.core import REGISTRY, Counter, Gauge\n\nfrom twisted.internet import defer\n\nfrom synapse.logging.context import LoggingContext, PreserveLoggingContext\n\nif TYPE_CHECKING:\n import resource\n\n\nlogger = logging.getLogger(__name__)\n\n\n_background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n [\"name\"],\n)\n\n_background_process_in_flight_count = Gauge(\n \"synapse_background_process_in_flight_count\",\n \"Number of background processes in flight\",\n labelnames=[\"name\"],\n)\n\n# we set registry=None in all of these to stop them getting registered with\n# the default registry. Instead we collect them all via the CustomCollector,\n# which ensures that we can update them before they are collected.\n#\n_background_process_ru_utime = Counter(\n \"synapse_background_process_ru_utime_seconds\",\n \"User CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_ru_stime = Counter(\n \"synapse_background_process_ru_stime_seconds\",\n \"System CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_count = Counter(\n \"synapse_background_process_db_txn_count\",\n \"Number of database transactions done by background processes\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_duration = Counter(\n \"synapse_background_process_db_txn_duration_seconds\",\n (\n \"Seconds spent by background processes waiting for database \"\n \"transactions, excluding scheduling time\"\n ),\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_sched_duration = Counter(\n \"synapse_background_process_db_sched_duration_seconds\",\n \"Seconds spent by background processes waiting for database connections\",\n [\"name\"],\n registry=None,\n)\n\n# map from description to a counter, so that we can name our logcontexts\n# incrementally. (It actually duplicates _background_process_start_count, but\n# it's much simpler to do so than to try to combine them.)\n_background_process_counts = {} # type: Dict[str, int]\n\n# Set of all running background processes that became active active since the\n# last time metrics were scraped (i.e. background processes that performed some\n# work since the last scrape.)\n#\n# We do it like this to handle the case where we have a large number of\n# background processes stacking up behind a lock or linearizer, where we then\n# only need to iterate over and update metrics for the process that have\n# actually been active and can ignore the idle ones.\n_background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]\n\n# A lock that covers the above set and dict\n_bg_metrics_lock = threading.Lock()\n\n\nclass _Collector:\n \"\"\"A custom metrics collector for the background process metrics.\n\n Ensures that all of the metrics are up-to-date with any in-flight processes\n before they are returned.\n \"\"\"\n\n def collect(self):\n global _background_processes_active_since_last_scrape\n\n # We swap out the _background_processes set with an empty one so that\n # we can safely iterate over the set without holding the lock.\n with _bg_metrics_lock:\n _background_processes_copy = _background_processes_active_since_last_scrape\n _background_processes_active_since_last_scrape = set()\n\n for process in _background_processes_copy:\n process.update_metrics()\n\n # now we need to run collect() over each of the static Counters, and\n # yield each metric they return.\n for m in (\n _background_process_ru_utime,\n _background_process_ru_stime,\n _background_process_db_txn_count,\n _background_process_db_txn_duration,\n _background_process_db_sched_duration,\n ):\n for r in m.collect():\n yield r\n\n\nREGISTRY.register(_Collector())\n\n\nclass _BackgroundProcess:\n def __init__(self, desc, ctx):\n self.desc = desc\n self._context = ctx\n self._reported_stats = None\n\n def update_metrics(self):\n \"\"\"Updates the metrics with values from this process.\"\"\"\n new_stats = self._context.get_resource_usage()\n if self._reported_stats is None:\n diff = new_stats\n else:\n diff = new_stats - self._reported_stats\n self._reported_stats = new_stats\n\n _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)\n _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)\n _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)\n _background_process_db_txn_duration.labels(self.desc).inc(\n diff.db_txn_duration_sec\n )\n _background_process_db_sched_duration.labels(self.desc).inc(\n diff.db_sched_duration_sec\n )\n\n\ndef run_as_background_process(desc: str, func, *args, **kwargs):\n \"\"\"Run the given function in its own logcontext, with resource metrics\n\n This should be used to wrap processes which are fired off to run in the\n background, instead of being associated with a particular request.\n\n It returns a Deferred which completes when the function completes, but it doesn't\n follow the synapse logcontext rules, which makes it appropriate for passing to\n clock.looping_call and friends (or for firing-and-forgetting in the middle of a\n normal synapse async function).\n\n Args:\n desc: a description for this background process type\n func: a function, which may return a Deferred or a coroutine\n args: positional args for func\n kwargs: keyword args for func\n\n Returns: Deferred which returns the result of func, but note that it does not\n follow the synapse logcontext rules.\n \"\"\"\n\n async def run():\n with _bg_metrics_lock:\n count = _background_process_counts.get(desc, 0)\n _background_process_counts[desc] = count + 1\n\n _background_process_start_count.labels(desc).inc()\n _background_process_in_flight_count.labels(desc).inc()\n\n with BackgroundProcessLoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n\n try:\n result = func(*args, **kwargs)\n\n if inspect.isawaitable(result):\n result = await result\n\n return result\n except Exception:\n logger.exception(\n \"Background process '%s' threw an exception\", desc,\n )\n finally:\n _background_process_in_flight_count.labels(desc).dec()\n\n with PreserveLoggingContext():\n # Note that we return a Deferred here so that it can be used in a\n # looping_call and other places that expect a Deferred.\n return defer.ensureDeferred(run())\n\n\ndef wrap_as_background_process(desc):\n \"\"\"Decorator that wraps a function that gets called as a background\n process.\n\n Equivalent of calling the function with `run_as_background_process`\n \"\"\"\n\n def wrap_as_background_process_inner(func):\n @wraps(func)\n def wrap_as_background_process_inner_2(*args, **kwargs):\n return run_as_background_process(desc, func, *args, **kwargs)\n\n return wrap_as_background_process_inner_2\n\n return wrap_as_background_process_inner\n\n\nclass BackgroundProcessLoggingContext(LoggingContext):\n \"\"\"A logging context that tracks in flight metrics for background\n processes.\n \"\"\"\n\n __slots__ = [\"_proc\"]\n\n def __init__(self, name: str):\n super().__init__(name)\n\n self._proc = _BackgroundProcess(name, self)\n\n def start(self, rusage: \"Optional[resource._RUsage]\"):\n \"\"\"Log context has started running (again).\n \"\"\"\n\n super().start(rusage)\n\n # We've become active again so we make sure we're in the list of active\n # procs. (Note that \"start\" here means we've become active, as opposed\n # to starting for the first time.)\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.add(self._proc)\n\n def __exit__(self, type, value, traceback) -> None:\n \"\"\"Log context has finished.\n \"\"\"\n\n super().__exit__(type, value, traceback)\n\n # The background process has finished. We explictly remove and manually\n # update the metrics here so that if nothing is scraping metrics the set\n # doesn't infinitely grow.\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.discard(self._proc)\n\n self._proc.update_metrics()\n", "path": "synapse/metrics/background_process_metrics.py"}]} | 3,353 | 251 |
gh_patches_debug_23886 | rasdani/github-patches | git_diff | Kinto__kinto-1927 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Kinto not able to start - Regression due to jsonschema Draft7Validator
#1808 introduced a regression, because the Draft7Validator is not currently released by the jsonschema package (they introduced it in master but it's not released yet)
Solution is either to depend on the master branch of jsonschema (not recommended I guess) or revert this change back until a new version is issued.
</issue>
<code>
[start of kinto/schema_validation.py]
1 import colander
2 from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate
3 from pyramid.settings import asbool
4
5 from kinto.core import utils
6 from kinto.core.errors import raise_invalid
7 from kinto.views import object_exists_or_404
8
9
10 class JSONSchemaMapping(colander.SchemaNode):
11 def schema_type(self, **kw):
12 return colander.Mapping(unknown="preserve")
13
14 def deserialize(self, cstruct=colander.null):
15 # Start by deserializing a simple mapping.
16 validated = super().deserialize(cstruct)
17
18 # In case it is optional in parent schema.
19 if not validated or validated in (colander.null, colander.drop):
20 return validated
21 try:
22 check_schema(validated)
23 except ValidationError as e:
24 self.raise_invalid(e.message)
25 return validated
26
27
28 def check_schema(data):
29 try:
30 Draft7Validator.check_schema(data)
31 except SchemaError as e:
32 message = e.path.pop() + e.message
33 raise ValidationError(message)
34
35
36 def validate_schema(data, schema, ignore_fields=[]):
37 required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
38 # jsonschema doesn't accept 'required': [] yet.
39 # See https://github.com/Julian/jsonschema/issues/337.
40 # In the meantime, strip out 'required' if no other fields are required.
41 if required_fields:
42 schema = {**schema, "required": required_fields}
43 else:
44 schema = {f: v for f, v in schema.items() if f != "required"}
45
46 data = {f: v for f, v in data.items() if f not in ignore_fields}
47
48 try:
49 validate(data, schema)
50 except ValidationError as e:
51 if e.path:
52 field = e.path[-1]
53 elif e.validator_value:
54 field = e.validator_value[-1]
55 else:
56 field = e.schema_path[-1]
57 e.field = field
58 raise e
59 # Raise an error here if a reference in the schema doesn't resolve.
60 # jsonschema doesn't provide schema validation checking upon creation yet,
61 # it must be validated against data.
62 # See https://github.com/Julian/jsonschema/issues/399
63 # For future support https://github.com/Julian/jsonschema/issues/346.
64 except RefResolutionError as e:
65 raise e
66
67
68 def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):
69 """Lookup in the parent objects if a schema was defined for this resource.
70
71 If the schema validation feature is enabled, if a schema is/are defined, and if the
72 data does not validate it/them, then it raises a 400 exception.
73 """
74 settings = request.registry.settings
75 schema_validation = "experimental_collection_schema_validation"
76 # If disabled from settings, do nothing.
77 if not asbool(settings.get(schema_validation)):
78 return
79
80 bucket_id = request.matchdict["bucket_id"]
81 bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id)
82 buckets = request.bound_data.setdefault("buckets", {})
83 if bucket_uri not in buckets:
84 # Unknown yet, fetch from storage.
85 bucket = object_exists_or_404(
86 request, collection_id="bucket", parent_id="", object_id=bucket_id
87 )
88 buckets[bucket_uri] = bucket
89
90 # Let's see if the bucket defines a schema for this resource.
91 metadata_field = "{}:schema".format(resource_name)
92 bucket = buckets[bucket_uri]
93 if metadata_field not in bucket:
94 return
95
96 # Validate or fail with 400.
97 schema = bucket[metadata_field]
98 try:
99 validate_schema(data, schema, ignore_fields=ignore_fields)
100 except ValidationError as e:
101 raise_invalid(request, name=e.field, description=e.message)
102 except RefResolutionError as e:
103 raise_invalid(request, name="schema", description=str(e))
104
[end of kinto/schema_validation.py]
[start of setup.py]
1 import codecs
2 import os
3 from setuptools import setup, find_packages
4
5 # abspath here because setup.py may be __main__, in which case
6 # __file__ is not guaranteed to be absolute
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 def read_file(filename):
11 """Open a related file and return its content."""
12 with codecs.open(os.path.join(here, filename), encoding="utf-8") as f:
13 content = f.read()
14 return content
15
16
17 README = read_file("README.rst")
18 CHANGELOG = read_file("CHANGELOG.rst")
19 CONTRIBUTORS = read_file("CONTRIBUTORS.rst")
20
21 REQUIREMENTS = [
22 "bcrypt",
23 "colander >= 1.4.0",
24 "cornice",
25 "cornice_swagger >= 0.5.1",
26 "dockerflow",
27 "jsonschema",
28 "jsonpatch",
29 "logging-color-formatter >= 1.0.1", # Message interpolations.
30 "python-dateutil",
31 "pyramid >= 1.9.1, < 2.0",
32 "pyramid_multiauth >= 0.8", # User on policy selected event.
33 "transaction",
34 # pyramid_tm changed the location of their tween in 2.x and one of
35 # our tests fails on 2.0.
36 "pyramid_tm >= 2.1",
37 "requests",
38 "waitress",
39 "ujson >= 1.35",
40 ]
41
42 POSTGRESQL_REQUIRES = ["SQLAlchemy", "psycopg2 > 2.5", "zope.sqlalchemy"]
43
44 REDIS_REQUIRES = ["kinto_redis"]
45
46 MEMCACHED_REQUIRES = ["python-memcached"]
47
48 SETUP_REQUIRES = ["pytest-runner"]
49
50 TEST_REQUIREMENTS = ["bravado_core", "pytest", "WebTest"]
51
52 DEPENDENCY_LINKS = []
53
54 MONITORING_REQUIRES = ["raven", "statsd", "newrelic", "werkzeug"]
55
56 ENTRY_POINTS = {
57 "paste.app_factory": ["main = kinto:main"],
58 "console_scripts": ["kinto = kinto.__main__:main"],
59 }
60
61
62 setup(
63 name="kinto",
64 version="11.2.0",
65 description="Kinto Web Service - Store, Sync, Share, and Self-Host.",
66 long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS),
67 license="Apache License (2.0)",
68 classifiers=[
69 "Programming Language :: Python",
70 "Programming Language :: Python :: 3",
71 "Programming Language :: Python :: 3.5",
72 "Programming Language :: Python :: 3.6",
73 "Programming Language :: Python :: Implementation :: CPython",
74 "Topic :: Internet :: WWW/HTTP",
75 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
76 "License :: OSI Approved :: Apache Software License",
77 ],
78 keywords="web sync json storage services",
79 author="Mozilla Services",
80 author_email="[email protected]",
81 url="https://github.com/Kinto/kinto",
82 packages=find_packages(),
83 package_data={"": ["*.rst", "*.py", "*.yaml"]},
84 include_package_data=True,
85 zip_safe=False,
86 setup_requires=SETUP_REQUIRES,
87 tests_require=TEST_REQUIREMENTS,
88 install_requires=REQUIREMENTS,
89 extras_require={
90 "redis": REDIS_REQUIRES,
91 "memcached": MEMCACHED_REQUIRES,
92 "postgresql": POSTGRESQL_REQUIRES,
93 "monitoring": MONITORING_REQUIRES,
94 },
95 test_suite="tests",
96 dependency_links=DEPENDENCY_LINKS,
97 entry_points=ENTRY_POINTS,
98 )
99
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py
--- a/kinto/schema_validation.py
+++ b/kinto/schema_validation.py
@@ -1,5 +1,11 @@
import colander
-from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate
+from jsonschema import ValidationError, SchemaError, RefResolutionError, validate
+
+try: # pragma: no cover
+ from jsonschema import Draft7Validator as DraftValidator
+except ImportError:
+ from jsonschema import Draft4Validator as DraftValidator
+
from pyramid.settings import asbool
from kinto.core import utils
@@ -27,7 +33,7 @@
def check_schema(data):
try:
- Draft7Validator.check_schema(data)
+ DraftValidator.check_schema(data)
except SchemaError as e:
message = e.path.pop() + e.message
raise ValidationError(message)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
setup(
name="kinto",
- version="11.2.0",
+ version="11.2.1",
description="Kinto Web Service - Store, Sync, Share, and Self-Host.",
long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS),
license="Apache License (2.0)",
| {"golden_diff": "diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py\n--- a/kinto/schema_validation.py\n+++ b/kinto/schema_validation.py\n@@ -1,5 +1,11 @@\n import colander\n-from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\n+from jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n+\n+try: # pragma: no cover\n+ from jsonschema import Draft7Validator as DraftValidator\n+except ImportError:\n+ from jsonschema import Draft4Validator as DraftValidator\n+\n from pyramid.settings import asbool\n \n from kinto.core import utils\n@@ -27,7 +33,7 @@\n \n def check_schema(data):\n try:\n- Draft7Validator.check_schema(data)\n+ DraftValidator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,7 @@\n \n setup(\n name=\"kinto\",\n- version=\"11.2.0\",\n+ version=\"11.2.1\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n", "issue": "Kinto not able to start - Regression due to jsonschema Draft7Validator\n#1808 introduced a regression, because the Draft7Validator is not currently released by the jsonschema package (they introduced it in master but it's not released yet)\r\n\r\nSolution is either to depend on the master branch of jsonschema (not recommended I guess) or revert this change back until a new version is issued.\n", "before_files": [{"content": "import colander\nfrom jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n Draft7Validator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, collection_id=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = \"{}:schema\".format(resource_name)\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding=\"utf-8\") as f:\n content = f.read()\n return content\n\n\nREADME = read_file(\"README.rst\")\nCHANGELOG = read_file(\"CHANGELOG.rst\")\nCONTRIBUTORS = read_file(\"CONTRIBUTORS.rst\")\n\nREQUIREMENTS = [\n \"bcrypt\",\n \"colander >= 1.4.0\",\n \"cornice\",\n \"cornice_swagger >= 0.5.1\",\n \"dockerflow\",\n \"jsonschema\",\n \"jsonpatch\",\n \"logging-color-formatter >= 1.0.1\", # Message interpolations.\n \"python-dateutil\",\n \"pyramid >= 1.9.1, < 2.0\",\n \"pyramid_multiauth >= 0.8\", # User on policy selected event.\n \"transaction\",\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n \"pyramid_tm >= 2.1\",\n \"requests\",\n \"waitress\",\n \"ujson >= 1.35\",\n]\n\nPOSTGRESQL_REQUIRES = [\"SQLAlchemy\", \"psycopg2 > 2.5\", \"zope.sqlalchemy\"]\n\nREDIS_REQUIRES = [\"kinto_redis\"]\n\nMEMCACHED_REQUIRES = [\"python-memcached\"]\n\nSETUP_REQUIRES = [\"pytest-runner\"]\n\nTEST_REQUIREMENTS = [\"bravado_core\", \"pytest\", \"WebTest\"]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\"raven\", \"statsd\", \"newrelic\", \"werkzeug\"]\n\nENTRY_POINTS = {\n \"paste.app_factory\": [\"main = kinto:main\"],\n \"console_scripts\": [\"kinto = kinto.__main__:main\"],\n}\n\n\nsetup(\n name=\"kinto\",\n version=\"11.2.0\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n keywords=\"web sync json storage services\",\n author=\"Mozilla Services\",\n author_email=\"[email protected]\",\n url=\"https://github.com/Kinto/kinto\",\n packages=find_packages(),\n package_data={\"\": [\"*.rst\", \"*.py\", \"*.yaml\"]},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n \"redis\": REDIS_REQUIRES,\n \"memcached\": MEMCACHED_REQUIRES,\n \"postgresql\": POSTGRESQL_REQUIRES,\n \"monitoring\": MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS,\n)\n", "path": "setup.py"}]} | 2,684 | 311 |
gh_patches_debug_50213 | rasdani/github-patches | git_diff | pex-tool__pex-1590 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.64
On the docket:
+ [x] Pex does not support mac universal2 wheels #1587
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.63"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.63"
+__version__ = "2.1.64"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.63\"\n+__version__ = \"2.1.64\"\n", "issue": "Release 2.1.64\nOn the docket:\r\n+ [x] Pex does not support mac universal2 wheels #1587 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.63\"\n", "path": "pex/version.py"}]} | 617 | 96 |
gh_patches_debug_19859 | rasdani/github-patches | git_diff | sublimelsp__LSP-1265 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support document symbol sorting
Currently, when we call `LSP: Document Symbols`, we get document symbols displayed in reverse order from the actual code. Like this:

This is because the LSP server returns document symbols in reverse order (in this example, the server is `rust-analyzer`).
Language Server Protocol doesn't specify the order of document symbols returned from the server, so how symbols are displayed should probably be decided and handled on the client-side.
Looking at VSCode, users have options to sort symbols in several ways (by position, by name, by category).

It would be best to provide sort options like VSCode, but for now, to sort them by something (I think by position is good) should be implemented before providing sort options.
</issue>
<code>
[start of plugin/symbols.py]
1 from .core.protocol import Request, Range
2 from .core.registry import LspTextCommand
3 from .core.typing import Any, List, Optional, Tuple, Dict, Generator
4 from .core.views import location_to_encoded_filename
5 from .core.views import range_to_region
6 from .core.views import SYMBOL_KINDS
7 from .core.views import text_document_identifier
8 from contextlib import contextmanager
9 import os
10 import sublime
11 import sublime_plugin
12
13
14 def unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:
15 if 1 <= kind <= len(SYMBOL_KINDS):
16 return SYMBOL_KINDS[kind - 1]
17 return sublime.KIND_ID_AMBIGUOUS, "?", "???", "comment"
18
19
20 def format_symbol_kind(kind: int) -> str:
21 if 1 <= kind <= len(SYMBOL_KINDS):
22 return SYMBOL_KINDS[kind - 1][2]
23 return str(kind)
24
25
26 def get_symbol_scope_from_lsp_kind(kind: int) -> str:
27 if 1 <= kind <= len(SYMBOL_KINDS):
28 return SYMBOL_KINDS[kind - 1][3]
29 return 'comment'
30
31
32 @contextmanager
33 def _additional_name(names: List[str], name: str) -> Generator[None, None, None]:
34 names.append(name)
35 yield
36 names.pop(-1)
37
38
39 class LspSelectionClearCommand(sublime_plugin.TextCommand):
40 """
41 Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async
42 context we need to have dedicated commands for this.
43
44 https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388
45 """
46
47 def run(self, _: sublime.Edit) -> None:
48 self.view.sel().clear()
49
50
51 class LspSelectionAddCommand(sublime_plugin.TextCommand):
52
53 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
54 for region in regions:
55 self.view.sel().add(sublime.Region(*region))
56
57
58 class LspSelectionSetCommand(sublime_plugin.TextCommand):
59
60 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
61 self.view.sel().clear()
62 for region in regions:
63 self.view.sel().add(sublime.Region(*region))
64
65
66 class LspDocumentSymbolsCommand(LspTextCommand):
67
68 capability = 'documentSymbolProvider'
69 REGIONS_KEY = 'lsp_document_symbols'
70
71 def __init__(self, view: sublime.View) -> None:
72 super().__init__(view)
73 self.old_regions = [] # type: List[sublime.Region]
74 self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]
75 self.is_first_selection = False
76
77 def run(self, edit: sublime.Edit) -> None:
78 session = self.best_session(self.capability)
79 if session:
80 session.send_request(
81 Request.documentSymbols({"textDocument": text_document_identifier(self.view)}), self.handle_response)
82
83 def handle_response(self, response: Any) -> None:
84 window = self.view.window()
85 if window and isinstance(response, list) and len(response) > 0:
86 self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]
87 self.is_first_selection = True
88 window.show_quick_panel(
89 self.process_symbols(response),
90 self.on_symbol_selected,
91 sublime.KEEP_OPEN_ON_FOCUS_LOST,
92 0,
93 self.on_highlighted)
94 self.view.run_command("lsp_selection_clear")
95
96 def region(self, index: int) -> sublime.Region:
97 return self.regions[index][0]
98
99 def selection_region(self, index: int) -> Optional[sublime.Region]:
100 return self.regions[index][1]
101
102 def scope(self, index: int) -> str:
103 return self.regions[index][2]
104
105 def on_symbol_selected(self, index: int) -> None:
106 if index == -1:
107 if len(self.old_regions) > 0:
108 self.view.run_command("lsp_selection_add", {"regions": [(r.a, r.b) for r in self.old_regions]})
109 self.view.show_at_center(self.old_regions[0].begin())
110 else:
111 region = self.selection_region(index) or self.region(index)
112 self.view.run_command("lsp_selection_add", {"regions": [(region.a, region.a)]})
113 self.view.show_at_center(region.a)
114 self.view.erase_regions(self.REGIONS_KEY)
115 self.old_regions.clear()
116 self.regions.clear()
117
118 def on_highlighted(self, index: int) -> None:
119 if self.is_first_selection:
120 self.is_first_selection = False
121 return
122 region = self.region(index)
123 self.view.show_at_center(region.a)
124 self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)
125
126 def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
127 self.regions.clear()
128 if 'selectionRange' in items[0]:
129 return self.process_document_symbols(items)
130 else:
131 return self.process_symbol_informations(items)
132
133 def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
134 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
135 names = [] # type: List[str]
136 for item in items:
137 self.process_document_symbol_recursive(quick_panel_items, item, names)
138 return quick_panel_items
139
140 def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],
141 names: List[str]) -> None:
142 lsp_kind = item["kind"]
143 self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),
144 range_to_region(Range.from_lsp(item['selectionRange']), self.view),
145 get_symbol_scope_from_lsp_kind(lsp_kind)))
146 name = item['name']
147 with _additional_name(names, name):
148 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
149 formatted_names = " > ".join(names)
150 st_details = item.get("detail") or ""
151 if st_details:
152 st_details = "{} | {}".format(st_details, formatted_names)
153 else:
154 st_details = formatted_names
155 quick_panel_items.append(
156 sublime.QuickPanelItem(
157 trigger=name,
158 details=st_details,
159 annotation=st_display_type,
160 kind=(st_kind, st_icon, st_display_type)))
161 children = item.get('children') or []
162 for child in children:
163 self.process_document_symbol_recursive(quick_panel_items, child, names)
164
165 def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
166 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
167 for item in items:
168 lsp_kind = item['kind']
169 self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),
170 None, get_symbol_scope_from_lsp_kind(lsp_kind)))
171 container = item.get("containerName")
172 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
173 quick_panel_items.append(
174 sublime.QuickPanelItem(
175 trigger=item["name"],
176 details=container or "",
177 annotation=st_display_type,
178 kind=(st_kind, st_icon, st_display_type)))
179 return quick_panel_items
180
181
182 class SymbolQueryInput(sublime_plugin.TextInputHandler):
183
184 def validate(self, txt: str) -> bool:
185 return txt != ""
186
187 def placeholder(self) -> str:
188 return "Symbol"
189
190
191 class LspWorkspaceSymbolsCommand(LspTextCommand):
192
193 capability = 'workspaceSymbolProvider'
194
195 def input(self, _args: Any) -> sublime_plugin.TextInputHandler:
196 return SymbolQueryInput()
197
198 def run(self, edit: sublime.Edit, symbol_query_input: str = "") -> None:
199 if symbol_query_input:
200 session = self.best_session(self.capability)
201 if session:
202 self.view.set_status("lsp_workspace_symbols", "Searching for '{}'...".format(symbol_query_input))
203 request = Request.workspaceSymbol({"query": symbol_query_input})
204 session.send_request(request, lambda r: self._handle_response(
205 symbol_query_input, r), self._handle_error)
206
207 def _format(self, s: Dict[str, Any]) -> str:
208 file_name = os.path.basename(s['location']['uri'])
209 symbol_kind = format_symbol_kind(s["kind"])
210 name = "{} ({}) - {} -- {}".format(s['name'], symbol_kind, s.get('containerName', ""), file_name)
211 return name
212
213 def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:
214 if index != -1:
215 symbol = symbols[index]
216 window = self.view.window()
217 if window:
218 window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)
219
220 def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:
221 self.view.erase_status("lsp_workspace_symbols")
222 if response:
223 matches = response
224 window = self.view.window()
225 if window:
226 window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))
227 else:
228 sublime.message_dialog("No matches found for query string: '{}'".format(query))
229
230 def _handle_error(self, error: Dict[str, Any]) -> None:
231 self.view.erase_status("lsp_workspace_symbols")
232 reason = error.get("message", "none provided by server :(")
233 msg = "command 'workspace/symbol' failed. Reason: {}".format(reason)
234 sublime.error_message(msg)
235
[end of plugin/symbols.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -125,10 +125,16 @@
def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
self.regions.clear()
+ panel_items = []
if 'selectionRange' in items[0]:
- return self.process_document_symbols(items)
+ panel_items = self.process_document_symbols(items)
else:
- return self.process_symbol_informations(items)
+ panel_items = self.process_symbol_informations(items)
+ # Sort both lists in sync according to the range's begin point.
+ sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))
+ sorted_regions, sorted_panel_items = sorted_results
+ self.regions = list(sorted_regions)
+ return list(sorted_panel_items)
def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
quick_panel_items = [] # type: List[sublime.QuickPanelItem]
| {"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -125,10 +125,16 @@\n \n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n+ panel_items = []\n if 'selectionRange' in items[0]:\n- return self.process_document_symbols(items)\n+ panel_items = self.process_document_symbols(items)\n else:\n- return self.process_symbol_informations(items)\n+ panel_items = self.process_symbol_informations(items)\n+ # Sort both lists in sync according to the range's begin point.\n+ sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))\n+ sorted_regions, sorted_panel_items = sorted_results\n+ self.regions = list(sorted_regions)\n+ return list(sorted_panel_items)\n \n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n", "issue": "Support document symbol sorting\nCurrently, when we call `LSP: Document Symbols`, we get document symbols displayed in reverse order from the actual code. Like this:\r\n\r\n\r\n\r\nThis is because the LSP server returns document symbols in reverse order (in this example, the server is `rust-analyzer`).\r\nLanguage Server Protocol doesn't specify the order of document symbols returned from the server, so how symbols are displayed should probably be decided and handled on the client-side.\r\n\r\nLooking at VSCode, users have options to sort symbols in several ways (by position, by name, by category).\r\n\r\n\r\n\r\nIt would be best to provide sort options like VSCode, but for now, to sort them by something (I think by position is good) should be implemented before providing sort options.\n", "before_files": [{"content": "from .core.protocol import Request, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Optional, Tuple, Dict, Generator\nfrom .core.views import location_to_encoded_filename\nfrom .core.views import range_to_region\nfrom .core.views import SYMBOL_KINDS\nfrom .core.views import text_document_identifier\nfrom contextlib import contextmanager\nimport os\nimport sublime\nimport sublime_plugin\n\n\ndef unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n return sublime.KIND_ID_AMBIGUOUS, \"?\", \"???\", \"comment\"\n\n\ndef format_symbol_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][2]\n return str(kind)\n\n\ndef get_symbol_scope_from_lsp_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][3]\n return 'comment'\n\n\n@contextmanager\ndef _additional_name(names: List[str], name: str) -> Generator[None, None, None]:\n names.append(name)\n yield\n names.pop(-1)\n\n\nclass LspSelectionClearCommand(sublime_plugin.TextCommand):\n \"\"\"\n Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async\n context we need to have dedicated commands for this.\n\n https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388\n \"\"\"\n\n def run(self, _: sublime.Edit) -> None:\n self.view.sel().clear()\n\n\nclass LspSelectionAddCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspSelectionSetCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n self.view.sel().clear()\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n\n capability = 'documentSymbolProvider'\n REGIONS_KEY = 'lsp_document_symbols'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.old_regions = [] # type: List[sublime.Region]\n self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]\n self.is_first_selection = False\n\n def run(self, edit: sublime.Edit) -> None:\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}), self.handle_response)\n\n def handle_response(self, response: Any) -> None:\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n self.is_first_selection = True\n window.show_quick_panel(\n self.process_symbols(response),\n self.on_symbol_selected,\n sublime.KEEP_OPEN_ON_FOCUS_LOST,\n 0,\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n\n def selection_region(self, index: int) -> Optional[sublime.Region]:\n return self.regions[index][1]\n\n def scope(self, index: int) -> str:\n return self.regions[index][2]\n\n def on_symbol_selected(self, index: int) -> None:\n if index == -1:\n if len(self.old_regions) > 0:\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(r.a, r.b) for r in self.old_regions]})\n self.view.show_at_center(self.old_regions[0].begin())\n else:\n region = self.selection_region(index) or self.region(index)\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(region.a, region.a)]})\n self.view.show_at_center(region.a)\n self.view.erase_regions(self.REGIONS_KEY)\n self.old_regions.clear()\n self.regions.clear()\n\n def on_highlighted(self, index: int) -> None:\n if self.is_first_selection:\n self.is_first_selection = False\n return\n region = self.region(index)\n self.view.show_at_center(region.a)\n self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)\n\n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n if 'selectionRange' in items[0]:\n return self.process_document_symbols(items)\n else:\n return self.process_symbol_informations(items)\n\n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n names = [] # type: List[str]\n for item in items:\n self.process_document_symbol_recursive(quick_panel_items, item, names)\n return quick_panel_items\n\n def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],\n names: List[str]) -> None:\n lsp_kind = item[\"kind\"]\n self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),\n range_to_region(Range.from_lsp(item['selectionRange']), self.view),\n get_symbol_scope_from_lsp_kind(lsp_kind)))\n name = item['name']\n with _additional_name(names, name):\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n formatted_names = \" > \".join(names)\n st_details = item.get(\"detail\") or \"\"\n if st_details:\n st_details = \"{} | {}\".format(st_details, formatted_names)\n else:\n st_details = formatted_names\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=name,\n details=st_details,\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n children = item.get('children') or []\n for child in children:\n self.process_document_symbol_recursive(quick_panel_items, child, names)\n\n def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n for item in items:\n lsp_kind = item['kind']\n self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),\n None, get_symbol_scope_from_lsp_kind(lsp_kind)))\n container = item.get(\"containerName\")\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=item[\"name\"],\n details=container or \"\",\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n return quick_panel_items\n\n\nclass SymbolQueryInput(sublime_plugin.TextInputHandler):\n\n def validate(self, txt: str) -> bool:\n return txt != \"\"\n\n def placeholder(self) -> str:\n return \"Symbol\"\n\n\nclass LspWorkspaceSymbolsCommand(LspTextCommand):\n\n capability = 'workspaceSymbolProvider'\n\n def input(self, _args: Any) -> sublime_plugin.TextInputHandler:\n return SymbolQueryInput()\n\n def run(self, edit: sublime.Edit, symbol_query_input: str = \"\") -> None:\n if symbol_query_input:\n session = self.best_session(self.capability)\n if session:\n self.view.set_status(\"lsp_workspace_symbols\", \"Searching for '{}'...\".format(symbol_query_input))\n request = Request.workspaceSymbol({\"query\": symbol_query_input})\n session.send_request(request, lambda r: self._handle_response(\n symbol_query_input, r), self._handle_error)\n\n def _format(self, s: Dict[str, Any]) -> str:\n file_name = os.path.basename(s['location']['uri'])\n symbol_kind = format_symbol_kind(s[\"kind\"])\n name = \"{} ({}) - {} -- {}\".format(s['name'], symbol_kind, s.get('containerName', \"\"), file_name)\n return name\n\n def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:\n if index != -1:\n symbol = symbols[index]\n window = self.view.window()\n if window:\n window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)\n\n def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n if response:\n matches = response\n window = self.view.window()\n if window:\n window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))\n else:\n sublime.message_dialog(\"No matches found for query string: '{}'\".format(query))\n\n def _handle_error(self, error: Dict[str, Any]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n reason = error.get(\"message\", \"none provided by server :(\")\n msg = \"command 'workspace/symbol' failed. Reason: {}\".format(reason)\n sublime.error_message(msg)\n", "path": "plugin/symbols.py"}]} | 3,561 | 257 |
gh_patches_debug_8532 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom reports removed from 'Reports' general view
Remove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners.

</issue>
<code>
[start of akvo/rest/views/report.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models import Q
8 from rest_framework.decorators import api_view
9 from rest_framework.response import Response
10
11 from akvo.rsr.models import Report, ReportFormat
12 from ..serializers import ReportSerializer, ReportFormatSerializer
13
14
15 @api_view(['GET'])
16 def reports(request):
17 """
18 A view for displaying all report information, sorted by title.
19 """
20
21 user = request.user
22 is_admin = user.is_active and (user.is_superuser or user.is_admin)
23 reports = Report.objects.all()
24 if not is_admin:
25 # Show only those reports that the user is allowed to see
26 reports = reports.filter(
27 Q(organisations=None) | Q(organisations__in=user.approved_organisations())
28 ).distinct()
29
30 # FIXME: Use a viewset instead?
31 return Response({
32 'count': reports.count(),
33 'results': [ReportSerializer(r).data for r in reports.order_by('title')],
34 })
35
36
37 @api_view(['GET'])
38 def report_formats(request):
39 """
40 A view for displaying all report format information.
41 """
42 return Response({
43 'count': ReportFormat.objects.all().count(),
44 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],
45 })
46
[end of akvo/rest/views/report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -23,8 +23,9 @@
reports = Report.objects.all()
if not is_admin:
# Show only those reports that the user is allowed to see
+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []
reports = reports.filter(
- Q(organisations=None) | Q(organisations__in=user.approved_organisations())
+ Q(organisations=None) | Q(organisations__in=approved_orgs)
).distinct()
# FIXME: Use a viewset instead?
| {"golden_diff": "diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py\n--- a/akvo/rest/views/report.py\n+++ b/akvo/rest/views/report.py\n@@ -23,8 +23,9 @@\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n- Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n+ Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n \n # FIXME: Use a viewset instead?\n", "issue": "Custom reports removed from 'Reports' general view\nRemove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners. \r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\n\n\n@api_view(['GET'])\ndef reports(request):\n \"\"\"\n A view for displaying all report information, sorted by title.\n \"\"\"\n\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n ).distinct()\n\n # FIXME: Use a viewset instead?\n return Response({\n 'count': reports.count(),\n 'results': [ReportSerializer(r).data for r in reports.order_by('title')],\n })\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n", "path": "akvo/rest/views/report.py"}]} | 1,058 | 166 |
gh_patches_debug_18194 | rasdani/github-patches | git_diff | quantumlib__Cirq-5060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clifford optimize either should support more general pauli phases or make assert
In the code to merge Clifford operations there is code that only works if a pauli string coefficient is $\pm 1$. If not it raises a NotImplementError, lines that are not tested :(
My suspicion here is that at this point the phase can only be these two values, but someone will need to better understand this code and the associated TODO.
</issue>
<code>
[start of cirq-core/cirq/contrib/paulistring/clifford_optimize.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Tuple, cast
16
17 from cirq import ops, circuits
18 from cirq.contrib.paulistring.convert_gate_set import converted_gate_set
19
20
21 def clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> circuits.Circuit:
22 # Convert to a circuit with SingleQubitCliffordGates,
23 # CZs and other ignored gates
24 c_cliff = converted_gate_set(circuit, no_clifford_gates=False, atol=atol)
25
26 all_ops = list(c_cliff.all_operations())
27
28 def find_merge_point(
29 start_i: int,
30 string_op: ops.PauliStringPhasor,
31 stop_at_cz: bool,
32 ) -> Tuple[int, ops.PauliStringPhasor, int]:
33 STOP = 0
34 CONTINUE = 1
35 SKIP = 2
36
37 def continue_condition(
38 op: ops.Operation, current_string: ops.PauliStringPhasor, is_first: bool
39 ) -> int:
40 if isinstance(op.gate, ops.SingleQubitCliffordGate):
41 return CONTINUE if len(current_string.pauli_string) != 1 else STOP
42 if isinstance(op.gate, ops.CZPowGate):
43 return STOP if stop_at_cz else CONTINUE
44 if (
45 isinstance(op, ops.PauliStringPhasor)
46 and len(op.qubits) == 1
47 and (op.pauli_string[op.qubits[0]] == current_string.pauli_string[op.qubits[0]])
48 ):
49 return SKIP
50 return STOP
51
52 modified_op = string_op
53 furthest_op = string_op
54 furthest_i = start_i + 1
55 num_passed_over = 0
56 for i in range(start_i + 1, len(all_ops)):
57 op = all_ops[i]
58 if not set(op.qubits) & set(modified_op.qubits):
59 # No qubits in common
60 continue
61 cont_cond = continue_condition(op, modified_op, i == start_i + 1)
62 if cont_cond == STOP:
63 if len(modified_op.pauli_string) == 1:
64 furthest_op = modified_op
65 furthest_i = i
66 break
67 if cont_cond == CONTINUE:
68 modified_op = modified_op.pass_operations_over([op], after_to_before=True)
69 num_passed_over += 1
70 if len(modified_op.pauli_string) == 1:
71 furthest_op = modified_op
72 furthest_i = i + 1
73
74 return furthest_i, furthest_op, num_passed_over
75
76 def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:
77 (orig_qubit,) = cliff_op.qubits
78 remaining_cliff_gate = ops.SingleQubitCliffordGate.I
79 for pauli, quarter_turns in reversed(
80 cast(ops.SingleQubitCliffordGate, cliff_op.gate).decompose_rotation()
81 ):
82 trans = remaining_cliff_gate.transform(pauli)
83 pauli = trans.to
84 quarter_turns *= -1 if trans.flip else 1
85 string_op = ops.PauliStringPhasor(
86 ops.PauliString(pauli(cliff_op.qubits[0])), exponent_neg=quarter_turns / 2
87 )
88
89 merge_i, merge_op, num_passed = find_merge_point(start_i, string_op, quarter_turns == 2)
90 assert merge_i > start_i
91 assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'
92
93 qubit, pauli = next(iter(merge_op.pauli_string.items()))
94 quarter_turns = round(merge_op.exponent_relative * 2)
95 if merge_op.pauli_string.coefficient not in [1, -1]:
96 # TODO: Add support for more general phases.
97 # Github issue: https://github.com/quantumlib/Cirq/issues/2962
98 # Legacy coverage ignore, we need test code that hits this.
99 # coverage: ignore
100 raise NotImplementedError(
101 'Only +1/-1 pauli string coefficients currently supported'
102 )
103 quarter_turns *= int(merge_op.pauli_string.coefficient.real)
104 quarter_turns %= 4
105 part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)
106
107 other_op = all_ops[merge_i] if merge_i < len(all_ops) else None
108 if other_op is not None and qubit not in set(other_op.qubits):
109 other_op = None
110
111 if isinstance(other_op, ops.GateOperation) and isinstance(
112 other_op.gate, ops.SingleQubitCliffordGate
113 ):
114 # Merge with another SingleQubitCliffordGate
115 new_op = part_cliff_gate.merged_with(other_op.gate)(qubit)
116 all_ops[merge_i] = new_op
117 elif (
118 isinstance(other_op, ops.GateOperation)
119 and isinstance(other_op.gate, ops.CZPowGate)
120 and other_op.gate.exponent == 1
121 and quarter_turns == 2
122 ):
123 # Pass whole Pauli gate over CZ, possibly adding a Z gate
124 if pauli != ops.pauli_gates.Z:
125 other_qubit = other_op.qubits[other_op.qubits.index(qubit) - 1]
126 all_ops.insert(merge_i + 1, ops.SingleQubitCliffordGate.Z(other_qubit))
127 all_ops.insert(merge_i + 1, part_cliff_gate(qubit))
128 elif isinstance(other_op, ops.PauliStringPhasor):
129 # Pass over a non-Clifford gate
130 mod_op = other_op.pass_operations_over([part_cliff_gate(qubit)])
131 all_ops[merge_i] = mod_op
132 all_ops.insert(merge_i + 1, part_cliff_gate(qubit))
133 elif merge_i > start_i + 1 and num_passed > 0:
134 # Moved Clifford through the circuit but nothing to merge
135 all_ops.insert(merge_i, part_cliff_gate(qubit))
136 else:
137 # Couldn't move Clifford
138 remaining_cliff_gate = remaining_cliff_gate.merged_with(part_cliff_gate)
139
140 if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:
141 all_ops.pop(start_i)
142 return True
143 all_ops[start_i] = remaining_cliff_gate(orig_qubit)
144 return False
145
146 def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:
147 """Returns the number of operations removed at or before start_i."""
148 for i in reversed(range(start_i)):
149 op = all_ops[i]
150 if not set(cz_op.qubits) & set(op.qubits):
151 # Don't share qubits
152 # Keep looking
153 continue
154 elif not (
155 isinstance(op, ops.GateOperation)
156 and isinstance(op.gate, ops.CZPowGate)
157 and op.gate.exponent == 1
158 ):
159 # Not a CZ gate
160 return 0
161 elif cz_op == op:
162 # Cancel two CZ gates
163 all_ops.pop(start_i)
164 all_ops.pop(i)
165 return 2
166 else:
167 # Two CZ gates that share one qubit
168 # Pass through and keep looking
169 continue # coverage: ignore
170 # The above line is covered by test_remove_staggered_czs but the
171 # coverage checker disagrees.
172 return 0
173
174 i = 0
175 while i < len(all_ops):
176 op = all_ops[i]
177 if isinstance(op, ops.GateOperation) and isinstance(op.gate, ops.SingleQubitCliffordGate):
178 if try_merge_clifford(op, i):
179 i -= 1
180 elif (
181 isinstance(op, ops.GateOperation)
182 and isinstance(op.gate, ops.CZPowGate)
183 and op.gate.exponent == 1
184 ):
185 num_rm = try_merge_cz(op, i)
186 i -= num_rm
187 i += 1
188
189 return circuits.Circuit(all_ops, strategy=circuits.InsertStrategy.EARLIEST)
190
[end of cirq-core/cirq/contrib/paulistring/clifford_optimize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
--- a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
+++ b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
@@ -92,14 +92,6 @@
qubit, pauli = next(iter(merge_op.pauli_string.items()))
quarter_turns = round(merge_op.exponent_relative * 2)
- if merge_op.pauli_string.coefficient not in [1, -1]:
- # TODO: Add support for more general phases.
- # Github issue: https://github.com/quantumlib/Cirq/issues/2962
- # Legacy coverage ignore, we need test code that hits this.
- # coverage: ignore
- raise NotImplementedError(
- 'Only +1/-1 pauli string coefficients currently supported'
- )
quarter_turns *= int(merge_op.pauli_string.coefficient.real)
quarter_turns %= 4
part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)
| {"golden_diff": "diff --git a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n--- a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n+++ b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n@@ -92,14 +92,6 @@\n \n qubit, pauli = next(iter(merge_op.pauli_string.items()))\n quarter_turns = round(merge_op.exponent_relative * 2)\n- if merge_op.pauli_string.coefficient not in [1, -1]:\n- # TODO: Add support for more general phases.\n- # Github issue: https://github.com/quantumlib/Cirq/issues/2962\n- # Legacy coverage ignore, we need test code that hits this.\n- # coverage: ignore\n- raise NotImplementedError(\n- 'Only +1/-1 pauli string coefficients currently supported'\n- )\n quarter_turns *= int(merge_op.pauli_string.coefficient.real)\n quarter_turns %= 4\n part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)\n", "issue": "Clifford optimize either should support more general pauli phases or make assert\nIn the code to merge Clifford operations there is code that only works if a pauli string coefficient is $\\pm 1$. If not it raises a NotImplementError, lines that are not tested :(\r\n\r\nMy suspicion here is that at this point the phase can only be these two values, but someone will need to better understand this code and the associated TODO.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, cast\n\nfrom cirq import ops, circuits\nfrom cirq.contrib.paulistring.convert_gate_set import converted_gate_set\n\n\ndef clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> circuits.Circuit:\n # Convert to a circuit with SingleQubitCliffordGates,\n # CZs and other ignored gates\n c_cliff = converted_gate_set(circuit, no_clifford_gates=False, atol=atol)\n\n all_ops = list(c_cliff.all_operations())\n\n def find_merge_point(\n start_i: int,\n string_op: ops.PauliStringPhasor,\n stop_at_cz: bool,\n ) -> Tuple[int, ops.PauliStringPhasor, int]:\n STOP = 0\n CONTINUE = 1\n SKIP = 2\n\n def continue_condition(\n op: ops.Operation, current_string: ops.PauliStringPhasor, is_first: bool\n ) -> int:\n if isinstance(op.gate, ops.SingleQubitCliffordGate):\n return CONTINUE if len(current_string.pauli_string) != 1 else STOP\n if isinstance(op.gate, ops.CZPowGate):\n return STOP if stop_at_cz else CONTINUE\n if (\n isinstance(op, ops.PauliStringPhasor)\n and len(op.qubits) == 1\n and (op.pauli_string[op.qubits[0]] == current_string.pauli_string[op.qubits[0]])\n ):\n return SKIP\n return STOP\n\n modified_op = string_op\n furthest_op = string_op\n furthest_i = start_i + 1\n num_passed_over = 0\n for i in range(start_i + 1, len(all_ops)):\n op = all_ops[i]\n if not set(op.qubits) & set(modified_op.qubits):\n # No qubits in common\n continue\n cont_cond = continue_condition(op, modified_op, i == start_i + 1)\n if cont_cond == STOP:\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i\n break\n if cont_cond == CONTINUE:\n modified_op = modified_op.pass_operations_over([op], after_to_before=True)\n num_passed_over += 1\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i + 1\n\n return furthest_i, furthest_op, num_passed_over\n\n def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:\n (orig_qubit,) = cliff_op.qubits\n remaining_cliff_gate = ops.SingleQubitCliffordGate.I\n for pauli, quarter_turns in reversed(\n cast(ops.SingleQubitCliffordGate, cliff_op.gate).decompose_rotation()\n ):\n trans = remaining_cliff_gate.transform(pauli)\n pauli = trans.to\n quarter_turns *= -1 if trans.flip else 1\n string_op = ops.PauliStringPhasor(\n ops.PauliString(pauli(cliff_op.qubits[0])), exponent_neg=quarter_turns / 2\n )\n\n merge_i, merge_op, num_passed = find_merge_point(start_i, string_op, quarter_turns == 2)\n assert merge_i > start_i\n assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'\n\n qubit, pauli = next(iter(merge_op.pauli_string.items()))\n quarter_turns = round(merge_op.exponent_relative * 2)\n if merge_op.pauli_string.coefficient not in [1, -1]:\n # TODO: Add support for more general phases.\n # Github issue: https://github.com/quantumlib/Cirq/issues/2962\n # Legacy coverage ignore, we need test code that hits this.\n # coverage: ignore\n raise NotImplementedError(\n 'Only +1/-1 pauli string coefficients currently supported'\n )\n quarter_turns *= int(merge_op.pauli_string.coefficient.real)\n quarter_turns %= 4\n part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)\n\n other_op = all_ops[merge_i] if merge_i < len(all_ops) else None\n if other_op is not None and qubit not in set(other_op.qubits):\n other_op = None\n\n if isinstance(other_op, ops.GateOperation) and isinstance(\n other_op.gate, ops.SingleQubitCliffordGate\n ):\n # Merge with another SingleQubitCliffordGate\n new_op = part_cliff_gate.merged_with(other_op.gate)(qubit)\n all_ops[merge_i] = new_op\n elif (\n isinstance(other_op, ops.GateOperation)\n and isinstance(other_op.gate, ops.CZPowGate)\n and other_op.gate.exponent == 1\n and quarter_turns == 2\n ):\n # Pass whole Pauli gate over CZ, possibly adding a Z gate\n if pauli != ops.pauli_gates.Z:\n other_qubit = other_op.qubits[other_op.qubits.index(qubit) - 1]\n all_ops.insert(merge_i + 1, ops.SingleQubitCliffordGate.Z(other_qubit))\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif isinstance(other_op, ops.PauliStringPhasor):\n # Pass over a non-Clifford gate\n mod_op = other_op.pass_operations_over([part_cliff_gate(qubit)])\n all_ops[merge_i] = mod_op\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif merge_i > start_i + 1 and num_passed > 0:\n # Moved Clifford through the circuit but nothing to merge\n all_ops.insert(merge_i, part_cliff_gate(qubit))\n else:\n # Couldn't move Clifford\n remaining_cliff_gate = remaining_cliff_gate.merged_with(part_cliff_gate)\n\n if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:\n all_ops.pop(start_i)\n return True\n all_ops[start_i] = remaining_cliff_gate(orig_qubit)\n return False\n\n def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:\n \"\"\"Returns the number of operations removed at or before start_i.\"\"\"\n for i in reversed(range(start_i)):\n op = all_ops[i]\n if not set(cz_op.qubits) & set(op.qubits):\n # Don't share qubits\n # Keep looking\n continue\n elif not (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n # Not a CZ gate\n return 0\n elif cz_op == op:\n # Cancel two CZ gates\n all_ops.pop(start_i)\n all_ops.pop(i)\n return 2\n else:\n # Two CZ gates that share one qubit\n # Pass through and keep looking\n continue # coverage: ignore\n # The above line is covered by test_remove_staggered_czs but the\n # coverage checker disagrees.\n return 0\n\n i = 0\n while i < len(all_ops):\n op = all_ops[i]\n if isinstance(op, ops.GateOperation) and isinstance(op.gate, ops.SingleQubitCliffordGate):\n if try_merge_clifford(op, i):\n i -= 1\n elif (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n num_rm = try_merge_cz(op, i)\n i -= num_rm\n i += 1\n\n return circuits.Circuit(all_ops, strategy=circuits.InsertStrategy.EARLIEST)\n", "path": "cirq-core/cirq/contrib/paulistring/clifford_optimize.py"}]} | 3,046 | 275 |
gh_patches_debug_11850 | rasdani/github-patches | git_diff | cobbler__cobbler-3397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Backport] GHSA-mcg6-h362-cmq5
### Original feature issue
- Advisory: https://github.com/advisories/GHSA-mcg6-h362-cmq5
### Target release
- [ ] release33
- [x] release32
- [ ] release30
### Reason
Stabilization of Cobbler 3.2.x in the Fedora Ecosystem
</issue>
<code>
[start of cobbler/modules/authentication/pam.py]
1 """
2 Authentication module that uses /etc/cobbler/auth.conf
3 Choice of authentication module is in /etc/cobbler/modules.conf
4
5 Copyright 2007-2009, Red Hat, Inc and Others
6 Michael DeHaan <michael.dehaan AT gmail>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301 USA
22
23 PAM python code based on the pam_python code created by Chris AtLee:
24 http://atlee.ca/software/pam/
25
26 #-----------------------------------------------
27 pam_python (c) 2007 Chris AtLee <[email protected]>
28 Licensed under the MIT license:
29 http://www.opensource.org/licenses/mit-license.php
30
31 PAM module for python
32
33 Provides an authenticate function that will allow the caller to authenticate
34 a user against the Pluggable Authentication Modules (PAM) on the system.
35
36 Implemented using ctypes, so no compilation is necessary.
37 """
38
39 from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
40 from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
41 from ctypes.util import find_library
42
43 LIBPAM = CDLL(find_library("pam"))
44 LIBC = CDLL(find_library("c"))
45
46 CALLOC = LIBC.calloc
47 CALLOC.restype = c_void_p
48 CALLOC.argtypes = [c_uint, c_uint]
49
50 STRDUP = LIBC.strdup
51 STRDUP.argstypes = [c_char_p]
52 STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
53
54 # Various constants
55 PAM_PROMPT_ECHO_OFF = 1
56 PAM_PROMPT_ECHO_ON = 2
57 PAM_ERROR_MSG = 3
58 PAM_TEXT_INFO = 4
59
60
61 def register() -> str:
62 """
63 The mandatory Cobbler module registration hook.
64 """
65 return "authn"
66
67
68 class PamHandle(Structure):
69 """
70 wrapper class for pam_handle_t
71 """
72 _fields_ = [("handle", c_void_p)]
73
74 def __init__(self):
75 Structure.__init__(self)
76 self.handle = 0
77
78
79 class PamMessage(Structure):
80 """
81 wrapper class for pam_message structure
82 """
83 _fields_ = [("msg_style", c_int), ("msg", c_char_p)]
84
85 def __repr__(self):
86 return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
87
88
89 class PamResponse(Structure):
90 """
91 wrapper class for pam_response structure
92 """
93 _fields_ = [("resp", c_char_p), ("resp_retcode", c_int)]
94
95 def __repr__(self):
96 return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
97
98
99 CONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)
100
101
102 class PamConv(Structure):
103 """
104 wrapper class for pam_conv structure
105 """
106 _fields_ = [("conv", CONV_FUNC), ("appdata_ptr", c_void_p)]
107
108
109 PAM_START = LIBPAM.pam_start
110 PAM_START.restype = c_int
111 PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]
112
113 PAM_AUTHENTICATE = LIBPAM.pam_authenticate
114 PAM_AUTHENTICATE.restype = c_int
115 PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
116
117
118 def authenticate(api_handle, username: str, password: str) -> bool:
119 """
120 :param api_handle: Used for resolving the the pam service name and getting the Logger.
121 :param username:The username to log in with.
122 :param password: The password to log in with.
123 :returns: True if the given username and password authenticate for the given service. Otherwise False
124 """
125
126 @CONV_FUNC
127 def my_conv(n_messages, messages, p_response, app_data):
128 """
129 Simple conversation function that responds to any prompt where the echo is off with the supplied password
130 """
131 # Create an array of n_messages response objects
132 addr = CALLOC(n_messages, sizeof(PamResponse))
133 p_response[0] = cast(addr, POINTER(PamResponse))
134 for i in range(n_messages):
135 if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
136 pw_copy = STRDUP(password.encode())
137 p_response.contents[i].resp = cast(pw_copy, c_char_p)
138 p_response.contents[i].resp_retcode = 0
139 return 0
140
141 try:
142 service = api_handle.settings().authn_pam_service
143 except:
144 service = 'login'
145
146 api_handle.logger.debug("authn_pam: PAM service is %s" % service)
147
148 handle = PamHandle()
149 conv = PamConv(my_conv, 0)
150 retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))
151
152 if retval != 0:
153 # TODO: This is not an authentication error, something has gone wrong starting up PAM
154 api_handle.logger.error("authn_pam: error initializing PAM library")
155 return False
156
157 retval = PAM_AUTHENTICATE(handle, 0)
158 return retval == 0
159
[end of cobbler/modules/authentication/pam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py
--- a/cobbler/modules/authentication/pam.py
+++ b/cobbler/modules/authentication/pam.py
@@ -114,6 +114,10 @@
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
+PAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt
+PAM_ACCT_MGMT.restype = c_int
+PAM_ACCT_MGMT.argtypes = [PamHandle, c_int]
+
def authenticate(api_handle, username: str, password: str) -> bool:
"""
@@ -155,4 +159,8 @@
return False
retval = PAM_AUTHENTICATE(handle, 0)
+
+ if retval == 0:
+ retval = PAM_ACCT_MGMT(handle, 0)
+
return retval == 0
| {"golden_diff": "diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py\n--- a/cobbler/modules/authentication/pam.py\n+++ b/cobbler/modules/authentication/pam.py\n@@ -114,6 +114,10 @@\n PAM_AUTHENTICATE.restype = c_int\n PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n \n+PAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt\n+PAM_ACCT_MGMT.restype = c_int\n+PAM_ACCT_MGMT.argtypes = [PamHandle, c_int]\n+\n \n def authenticate(api_handle, username: str, password: str) -> bool:\n \"\"\"\n@@ -155,4 +159,8 @@\n return False\n \n retval = PAM_AUTHENTICATE(handle, 0)\n+\n+ if retval == 0:\n+ retval = PAM_ACCT_MGMT(handle, 0)\n+\n return retval == 0\n", "issue": "[Backport] GHSA-mcg6-h362-cmq5\n### Original feature issue\r\n\r\n- Advisory: https://github.com/advisories/GHSA-mcg6-h362-cmq5\r\n\r\n### Target release\r\n\r\n- [ ] release33\r\n- [x] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilization of Cobbler 3.2.x in the Fedora Ecosystem\r\n\n", "before_files": [{"content": "\"\"\"\nAuthentication module that uses /etc/cobbler/auth.conf\nChoice of authentication module is in /etc/cobbler/modules.conf\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\nPAM python code based on the pam_python code created by Chris AtLee:\nhttp://atlee.ca/software/pam/\n\n#-----------------------------------------------\npam_python (c) 2007 Chris AtLee <[email protected]>\nLicensed under the MIT license:\nhttp://www.opensource.org/licenses/mit-license.php\n\nPAM module for python\n\nProvides an authenticate function that will allow the caller to authenticate\na user against the Pluggable Authentication Modules (PAM) on the system.\n\nImplemented using ctypes, so no compilation is necessary.\n\"\"\"\n\nfrom ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof\nfrom ctypes import c_void_p, c_uint, c_char_p, c_char, c_int\nfrom ctypes.util import find_library\n\nLIBPAM = CDLL(find_library(\"pam\"))\nLIBC = CDLL(find_library(\"c\"))\n\nCALLOC = LIBC.calloc\nCALLOC.restype = c_void_p\nCALLOC.argtypes = [c_uint, c_uint]\n\nSTRDUP = LIBC.strdup\nSTRDUP.argstypes = [c_char_p]\nSTRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!\n\n# Various constants\nPAM_PROMPT_ECHO_OFF = 1\nPAM_PROMPT_ECHO_ON = 2\nPAM_ERROR_MSG = 3\nPAM_TEXT_INFO = 4\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"authn\"\n\n\nclass PamHandle(Structure):\n \"\"\"\n wrapper class for pam_handle_t\n \"\"\"\n _fields_ = [(\"handle\", c_void_p)]\n\n def __init__(self):\n Structure.__init__(self)\n self.handle = 0\n\n\nclass PamMessage(Structure):\n \"\"\"\n wrapper class for pam_message structure\n \"\"\"\n _fields_ = [(\"msg_style\", c_int), (\"msg\", c_char_p)]\n\n def __repr__(self):\n return \"<PamMessage %i '%s'>\" % (self.msg_style, self.msg)\n\n\nclass PamResponse(Structure):\n \"\"\"\n wrapper class for pam_response structure\n \"\"\"\n _fields_ = [(\"resp\", c_char_p), (\"resp_retcode\", c_int)]\n\n def __repr__(self):\n return \"<PamResponse %i '%s'>\" % (self.resp_retcode, self.resp)\n\n\nCONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)\n\n\nclass PamConv(Structure):\n \"\"\"\n wrapper class for pam_conv structure\n \"\"\"\n _fields_ = [(\"conv\", CONV_FUNC), (\"appdata_ptr\", c_void_p)]\n\n\nPAM_START = LIBPAM.pam_start\nPAM_START.restype = c_int\nPAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]\n\nPAM_AUTHENTICATE = LIBPAM.pam_authenticate\nPAM_AUTHENTICATE.restype = c_int\nPAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n\n\ndef authenticate(api_handle, username: str, password: str) -> bool:\n \"\"\"\n :param api_handle: Used for resolving the the pam service name and getting the Logger.\n :param username:The username to log in with.\n :param password: The password to log in with.\n :returns: True if the given username and password authenticate for the given service. Otherwise False\n \"\"\"\n\n @CONV_FUNC\n def my_conv(n_messages, messages, p_response, app_data):\n \"\"\"\n Simple conversation function that responds to any prompt where the echo is off with the supplied password\n \"\"\"\n # Create an array of n_messages response objects\n addr = CALLOC(n_messages, sizeof(PamResponse))\n p_response[0] = cast(addr, POINTER(PamResponse))\n for i in range(n_messages):\n if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:\n pw_copy = STRDUP(password.encode())\n p_response.contents[i].resp = cast(pw_copy, c_char_p)\n p_response.contents[i].resp_retcode = 0\n return 0\n\n try:\n service = api_handle.settings().authn_pam_service\n except:\n service = 'login'\n\n api_handle.logger.debug(\"authn_pam: PAM service is %s\" % service)\n\n handle = PamHandle()\n conv = PamConv(my_conv, 0)\n retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))\n\n if retval != 0:\n # TODO: This is not an authentication error, something has gone wrong starting up PAM\n api_handle.logger.error(\"authn_pam: error initializing PAM library\")\n return False\n\n retval = PAM_AUTHENTICATE(handle, 0)\n return retval == 0\n", "path": "cobbler/modules/authentication/pam.py"}]} | 2,295 | 218 |
gh_patches_debug_17259 | rasdani/github-patches | git_diff | encode__httpx-629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTP/2 download speeds / flow control settings
Which having an initial look at HTTP/3, @jlaine dug out that we seem to have signiifcantly slow uploading right now.
First thing to do would be to investigate and replicate, by eg. compare and contrast a simple upload from `httpx` vs `requests/urllib3` - does it replicate trivially and reliably? This really *shouldn't* be an issue for us so will need some looking at.
Initial thoughts on where issues could be:
* We're sending too-small chunks, and always waiting for the network to fully drain on each chunk is negatively impacting performance.
* We're sending too-small chunks, and ending up with largish computation overhead relative to data transfer as a result.
* We've got some big unseen overhead in the sync-to-async marshalling.
* Something else?
</issue>
<code>
[start of httpx/dispatch/http2.py]
1 import typing
2
3 import h2.connection
4 import h2.events
5 from h2.settings import SettingCodes, Settings
6
7 from ..concurrency.base import (
8 BaseEvent,
9 BaseSocketStream,
10 ConcurrencyBackend,
11 lookup_backend,
12 )
13 from ..config import Timeout
14 from ..exceptions import ProtocolError
15 from ..models import Request, Response
16 from ..utils import get_logger
17 from .base import OpenConnection
18
19 logger = get_logger(__name__)
20
21
22 class HTTP2Connection(OpenConnection):
23 READ_NUM_BYTES = 4096
24
25 def __init__(
26 self,
27 socket: BaseSocketStream,
28 backend: typing.Union[str, ConcurrencyBackend] = "auto",
29 on_release: typing.Callable = None,
30 ):
31 self.socket = socket
32 self.backend = lookup_backend(backend)
33 self.on_release = on_release
34 self.state = h2.connection.H2Connection()
35
36 self.streams = {} # type: typing.Dict[int, HTTP2Stream]
37 self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]
38
39 self.init_started = False
40
41 @property
42 def is_http2(self) -> bool:
43 return True
44
45 @property
46 def init_complete(self) -> BaseEvent:
47 # We do this lazily, to make sure backend autodetection always
48 # runs within an async context.
49 if not hasattr(self, "_initialization_complete"):
50 self._initialization_complete = self.backend.create_event()
51 return self._initialization_complete
52
53 async def send(self, request: Request, timeout: Timeout = None) -> Response:
54 timeout = Timeout() if timeout is None else timeout
55
56 if not self.init_started:
57 # The very first stream is responsible for initiating the connection.
58 self.init_started = True
59 await self.send_connection_init(timeout)
60 stream_id = self.state.get_next_available_stream_id()
61 self.init_complete.set()
62 else:
63 # All other streams need to wait until the connection is established.
64 await self.init_complete.wait()
65 stream_id = self.state.get_next_available_stream_id()
66
67 stream = HTTP2Stream(stream_id=stream_id, connection=self)
68 self.streams[stream_id] = stream
69 self.events[stream_id] = []
70 return await stream.send(request, timeout)
71
72 async def send_connection_init(self, timeout: Timeout) -> None:
73 """
74 The HTTP/2 connection requires some initial setup before we can start
75 using individual request/response streams on it.
76 """
77
78 # Need to set these manually here instead of manipulating via
79 # __setitem__() otherwise the H2Connection will emit SettingsUpdate
80 # frames in addition to sending the undesired defaults.
81 self.state.local_settings = Settings(
82 client=True,
83 initial_values={
84 # Disable PUSH_PROMISE frames from the server since we don't do anything
85 # with them for now. Maybe when we support caching?
86 SettingCodes.ENABLE_PUSH: 0,
87 # These two are taken from h2 for safe defaults
88 SettingCodes.MAX_CONCURRENT_STREAMS: 100,
89 SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
90 },
91 )
92
93 # Some websites (*cough* Yahoo *cough*) balk at this setting being
94 # present in the initial handshake since it's not defined in the original
95 # RFC despite the RFC mandating ignoring settings you don't know about.
96 del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]
97
98 self.state.initiate_connection()
99 data_to_send = self.state.data_to_send()
100 await self.socket.write(data_to_send, timeout)
101
102 @property
103 def is_closed(self) -> bool:
104 return False
105
106 def is_connection_dropped(self) -> bool:
107 return self.socket.is_connection_dropped()
108
109 async def close(self) -> None:
110 await self.socket.close()
111
112 async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:
113 """
114 Returns the maximum allowable outgoing flow for a given stream.
115
116 If the allowable flow is zero, then waits on the network until
117 WindowUpdated frames have increased the flow rate.
118
119 https://tools.ietf.org/html/rfc7540#section-6.9
120 """
121 local_flow = self.state.local_flow_control_window(stream_id)
122 connection_flow = self.state.max_outbound_frame_size
123 flow = min(local_flow, connection_flow)
124 while flow == 0:
125 await self.receive_events(timeout)
126 local_flow = self.state.local_flow_control_window(stream_id)
127 connection_flow = self.state.max_outbound_frame_size
128 flow = min(local_flow, connection_flow)
129 return flow
130
131 async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:
132 """
133 Returns the next event for a given stream.
134
135 If no events are available yet, then waits on the network until
136 an event is available.
137 """
138 while not self.events[stream_id]:
139 await self.receive_events(timeout)
140 return self.events[stream_id].pop(0)
141
142 async def receive_events(self, timeout: Timeout) -> None:
143 """
144 Read some data from the network, and update the H2 state.
145 """
146 data = await self.socket.read(self.READ_NUM_BYTES, timeout)
147 events = self.state.receive_data(data)
148 for event in events:
149 event_stream_id = getattr(event, "stream_id", 0)
150 logger.trace(f"receive_event stream_id={event_stream_id} event={event!r}")
151
152 if hasattr(event, "error_code"):
153 raise ProtocolError(event)
154
155 if event_stream_id in self.events:
156 self.events[event_stream_id].append(event)
157
158 data_to_send = self.state.data_to_send()
159 await self.socket.write(data_to_send, timeout)
160
161 async def send_headers(
162 self,
163 stream_id: int,
164 headers: typing.List[typing.Tuple[bytes, bytes]],
165 timeout: Timeout,
166 ) -> None:
167 self.state.send_headers(stream_id, headers)
168 data_to_send = self.state.data_to_send()
169 await self.socket.write(data_to_send, timeout)
170
171 async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:
172 self.state.send_data(stream_id, chunk)
173 data_to_send = self.state.data_to_send()
174 await self.socket.write(data_to_send, timeout)
175
176 async def end_stream(self, stream_id: int, timeout: Timeout) -> None:
177 self.state.end_stream(stream_id)
178 data_to_send = self.state.data_to_send()
179 await self.socket.write(data_to_send, timeout)
180
181 async def acknowledge_received_data(
182 self, stream_id: int, amount: int, timeout: Timeout
183 ) -> None:
184 self.state.acknowledge_received_data(amount, stream_id)
185 data_to_send = self.state.data_to_send()
186 await self.socket.write(data_to_send, timeout)
187
188 async def close_stream(self, stream_id: int) -> None:
189 del self.streams[stream_id]
190 del self.events[stream_id]
191
192 if not self.streams and self.on_release is not None:
193 await self.on_release()
194
195
196 class HTTP2Stream:
197 def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:
198 self.stream_id = stream_id
199 self.connection = connection
200
201 async def send(self, request: Request, timeout: Timeout) -> Response:
202 # Send the request.
203 await self.send_headers(request, timeout)
204 await self.send_body(request, timeout)
205
206 # Receive the response.
207 status_code, headers = await self.receive_response(timeout)
208 content = self.body_iter(timeout)
209 return Response(
210 status_code=status_code,
211 http_version="HTTP/2",
212 headers=headers,
213 content=content,
214 on_close=self.close,
215 request=request,
216 )
217
218 async def send_headers(self, request: Request, timeout: Timeout) -> None:
219 headers = [
220 (b":method", request.method.encode("ascii")),
221 (b":authority", request.url.authority.encode("ascii")),
222 (b":scheme", request.url.scheme.encode("ascii")),
223 (b":path", request.url.full_path.encode("ascii")),
224 ] + [(k, v) for k, v in request.headers.raw if k != b"host"]
225
226 logger.trace(
227 f"send_headers "
228 f"stream_id={self.stream_id} "
229 f"method={request.method!r} "
230 f"target={request.url.full_path!r} "
231 f"headers={headers!r}"
232 )
233 await self.connection.send_headers(self.stream_id, headers, timeout)
234
235 async def send_body(self, request: Request, timeout: Timeout) -> None:
236 logger.trace(f"send_body stream_id={self.stream_id}")
237 async for data in request.stream():
238 while data:
239 max_flow = await self.connection.wait_for_outgoing_flow(
240 self.stream_id, timeout
241 )
242 chunk_size = min(len(data), max_flow)
243 chunk, data = data[:chunk_size], data[chunk_size:]
244 await self.connection.send_data(self.stream_id, chunk, timeout)
245
246 await self.connection.end_stream(self.stream_id, timeout)
247
248 async def receive_response(
249 self, timeout: Timeout
250 ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:
251 """
252 Read the response status and headers from the network.
253 """
254 while True:
255 event = await self.connection.wait_for_event(self.stream_id, timeout)
256 if isinstance(event, h2.events.ResponseReceived):
257 break
258
259 status_code = 200
260 headers = []
261 for k, v in event.headers:
262 if k == b":status":
263 status_code = int(v.decode("ascii", errors="ignore"))
264 elif not k.startswith(b":"):
265 headers.append((k, v))
266
267 return (status_code, headers)
268
269 async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:
270 while True:
271 event = await self.connection.wait_for_event(self.stream_id, timeout)
272 if isinstance(event, h2.events.DataReceived):
273 amount = event.flow_controlled_length
274 await self.connection.acknowledge_received_data(
275 self.stream_id, amount, timeout
276 )
277 yield event.data
278 elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):
279 break
280
281 async def close(self) -> None:
282 await self.connection.close_stream(self.stream_id)
283
[end of httpx/dispatch/http2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py
--- a/httpx/dispatch/http2.py
+++ b/httpx/dispatch/http2.py
@@ -96,6 +96,7 @@
del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]
self.state.initiate_connection()
+ self.state.increment_flow_control_window(2 ** 24)
data_to_send = self.state.data_to_send()
await self.socket.write(data_to_send, timeout)
@@ -165,6 +166,7 @@
timeout: Timeout,
) -> None:
self.state.send_headers(stream_id, headers)
+ self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)
data_to_send = self.state.data_to_send()
await self.socket.write(data_to_send, timeout)
| {"golden_diff": "diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py\n--- a/httpx/dispatch/http2.py\n+++ b/httpx/dispatch/http2.py\n@@ -96,6 +96,7 @@\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n \n self.state.initiate_connection()\n+ self.state.increment_flow_control_window(2 ** 24)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n \n@@ -165,6 +166,7 @@\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n+ self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n", "issue": "HTTP/2 download speeds / flow control settings\nWhich having an initial look at HTTP/3, @jlaine dug out that we seem to have signiifcantly slow uploading right now.\r\n\r\nFirst thing to do would be to investigate and replicate, by eg. compare and contrast a simple upload from `httpx` vs `requests/urllib3` - does it replicate trivially and reliably? This really *shouldn't* be an issue for us so will need some looking at.\r\n\r\nInitial thoughts on where issues could be:\r\n\r\n* We're sending too-small chunks, and always waiting for the network to fully drain on each chunk is negatively impacting performance.\r\n* We're sending too-small chunks, and ending up with largish computation overhead relative to data transfer as a result.\r\n* We've got some big unseen overhead in the sync-to-async marshalling.\r\n* Something else?\n", "before_files": [{"content": "import typing\n\nimport h2.connection\nimport h2.events\nfrom h2.settings import SettingCodes, Settings\n\nfrom ..concurrency.base import (\n BaseEvent,\n BaseSocketStream,\n ConcurrencyBackend,\n lookup_backend,\n)\nfrom ..config import Timeout\nfrom ..exceptions import ProtocolError\nfrom ..models import Request, Response\nfrom ..utils import get_logger\nfrom .base import OpenConnection\n\nlogger = get_logger(__name__)\n\n\nclass HTTP2Connection(OpenConnection):\n READ_NUM_BYTES = 4096\n\n def __init__(\n self,\n socket: BaseSocketStream,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n on_release: typing.Callable = None,\n ):\n self.socket = socket\n self.backend = lookup_backend(backend)\n self.on_release = on_release\n self.state = h2.connection.H2Connection()\n\n self.streams = {} # type: typing.Dict[int, HTTP2Stream]\n self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]\n\n self.init_started = False\n\n @property\n def is_http2(self) -> bool:\n return True\n\n @property\n def init_complete(self) -> BaseEvent:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_initialization_complete\"):\n self._initialization_complete = self.backend.create_event()\n return self._initialization_complete\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n timeout = Timeout() if timeout is None else timeout\n\n if not self.init_started:\n # The very first stream is responsible for initiating the connection.\n self.init_started = True\n await self.send_connection_init(timeout)\n stream_id = self.state.get_next_available_stream_id()\n self.init_complete.set()\n else:\n # All other streams need to wait until the connection is established.\n await self.init_complete.wait()\n stream_id = self.state.get_next_available_stream_id()\n\n stream = HTTP2Stream(stream_id=stream_id, connection=self)\n self.streams[stream_id] = stream\n self.events[stream_id] = []\n return await stream.send(request, timeout)\n\n async def send_connection_init(self, timeout: Timeout) -> None:\n \"\"\"\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n \"\"\"\n\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self.state.local_settings = Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n\n self.state.initiate_connection()\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n @property\n def is_closed(self) -> bool:\n return False\n\n def is_connection_dropped(self) -> bool:\n return self.socket.is_connection_dropped()\n\n async def close(self) -> None:\n await self.socket.close()\n\n async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:\n \"\"\"\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n\n https://tools.ietf.org/html/rfc7540#section-6.9\n \"\"\"\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n while flow == 0:\n await self.receive_events(timeout)\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n return flow\n\n async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:\n \"\"\"\n Returns the next event for a given stream.\n\n If no events are available yet, then waits on the network until\n an event is available.\n \"\"\"\n while not self.events[stream_id]:\n await self.receive_events(timeout)\n return self.events[stream_id].pop(0)\n\n async def receive_events(self, timeout: Timeout) -> None:\n \"\"\"\n Read some data from the network, and update the H2 state.\n \"\"\"\n data = await self.socket.read(self.READ_NUM_BYTES, timeout)\n events = self.state.receive_data(data)\n for event in events:\n event_stream_id = getattr(event, \"stream_id\", 0)\n logger.trace(f\"receive_event stream_id={event_stream_id} event={event!r}\")\n\n if hasattr(event, \"error_code\"):\n raise ProtocolError(event)\n\n if event_stream_id in self.events:\n self.events[event_stream_id].append(event)\n\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_headers(\n self,\n stream_id: int,\n headers: typing.List[typing.Tuple[bytes, bytes]],\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:\n self.state.send_data(stream_id, chunk)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def end_stream(self, stream_id: int, timeout: Timeout) -> None:\n self.state.end_stream(stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def acknowledge_received_data(\n self, stream_id: int, amount: int, timeout: Timeout\n ) -> None:\n self.state.acknowledge_received_data(amount, stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def close_stream(self, stream_id: int) -> None:\n del self.streams[stream_id]\n del self.events[stream_id]\n\n if not self.streams and self.on_release is not None:\n await self.on_release()\n\n\nclass HTTP2Stream:\n def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:\n self.stream_id = stream_id\n self.connection = connection\n\n async def send(self, request: Request, timeout: Timeout) -> Response:\n # Send the request.\n await self.send_headers(request, timeout)\n await self.send_body(request, timeout)\n\n # Receive the response.\n status_code, headers = await self.receive_response(timeout)\n content = self.body_iter(timeout)\n return Response(\n status_code=status_code,\n http_version=\"HTTP/2\",\n headers=headers,\n content=content,\n on_close=self.close,\n request=request,\n )\n\n async def send_headers(self, request: Request, timeout: Timeout) -> None:\n headers = [\n (b\":method\", request.method.encode(\"ascii\")),\n (b\":authority\", request.url.authority.encode(\"ascii\")),\n (b\":scheme\", request.url.scheme.encode(\"ascii\")),\n (b\":path\", request.url.full_path.encode(\"ascii\")),\n ] + [(k, v) for k, v in request.headers.raw if k != b\"host\"]\n\n logger.trace(\n f\"send_headers \"\n f\"stream_id={self.stream_id} \"\n f\"method={request.method!r} \"\n f\"target={request.url.full_path!r} \"\n f\"headers={headers!r}\"\n )\n await self.connection.send_headers(self.stream_id, headers, timeout)\n\n async def send_body(self, request: Request, timeout: Timeout) -> None:\n logger.trace(f\"send_body stream_id={self.stream_id}\")\n async for data in request.stream():\n while data:\n max_flow = await self.connection.wait_for_outgoing_flow(\n self.stream_id, timeout\n )\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n await self.connection.send_data(self.stream_id, chunk, timeout)\n\n await self.connection.end_stream(self.stream_id, timeout)\n\n async def receive_response(\n self, timeout: Timeout\n ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:\n \"\"\"\n Read the response status and headers from the network.\n \"\"\"\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n for k, v in event.headers:\n if k == b\":status\":\n status_code = int(v.decode(\"ascii\", errors=\"ignore\"))\n elif not k.startswith(b\":\"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.DataReceived):\n amount = event.flow_controlled_length\n await self.connection.acknowledge_received_data(\n self.stream_id, amount, timeout\n )\n yield event.data\n elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):\n break\n\n async def close(self) -> None:\n await self.connection.close_stream(self.stream_id)\n", "path": "httpx/dispatch/http2.py"}]} | 3,731 | 192 |
gh_patches_debug_23492 | rasdani/github-patches | git_diff | getsentry__sentry-45670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crons: Increase quota call API Sentry monitors "check-in"
### Problem Statement
Due multi-tenancy was reached quota call API Sentry monitors "check-in":
"You are attempting to use this endpoint too frequently. Limit is 40 requests in 1 seconds"
### Solution Brainstorm
Increase quota call API Sentry monitors "check-in" at least to 60 requests in 1 seconds
</issue>
<code>
[start of src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py]
1 from __future__ import annotations
2
3 from django.db import transaction
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.exceptions import Throttled
6 from rest_framework.request import Request
7 from rest_framework.response import Response
8
9 from sentry import ratelimits
10 from sentry.api.base import region_silo_endpoint
11 from sentry.api.serializers import serialize
12 from sentry.apidocs.constants import (
13 RESPONSE_BAD_REQUEST,
14 RESPONSE_FORBIDDEN,
15 RESPONSE_NOTFOUND,
16 RESPONSE_UNAUTHORIZED,
17 )
18 from sentry.apidocs.parameters import GLOBAL_PARAMS, MONITOR_PARAMS
19 from sentry.apidocs.utils import inline_sentry_response_serializer
20 from sentry.models import Environment, Project, ProjectKey
21 from sentry.monitors.models import (
22 CheckInStatus,
23 Monitor,
24 MonitorCheckIn,
25 MonitorEnvironment,
26 MonitorStatus,
27 )
28 from sentry.monitors.serializers import MonitorCheckInSerializerResponse
29 from sentry.monitors.validators import MonitorCheckInValidator
30 from sentry.signals import first_cron_checkin_received, first_cron_monitor_created
31 from sentry.utils import metrics
32
33 from .base import MonitorIngestEndpoint
34
35 CHECKIN_QUOTA_LIMIT = 5
36 CHECKIN_QUOTA_WINDOW = 60
37
38
39 @region_silo_endpoint
40 @extend_schema(tags=["Crons"])
41 class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):
42 public = {"POST"}
43
44 @extend_schema(
45 operation_id="Create a new check-in",
46 parameters=[
47 GLOBAL_PARAMS.ORG_SLUG,
48 MONITOR_PARAMS.MONITOR_ID,
49 ],
50 request=MonitorCheckInValidator,
51 responses={
52 200: inline_sentry_response_serializer(
53 "MonitorCheckIn", MonitorCheckInSerializerResponse
54 ),
55 201: inline_sentry_response_serializer(
56 "MonitorCheckIn", MonitorCheckInSerializerResponse
57 ),
58 400: RESPONSE_BAD_REQUEST,
59 401: RESPONSE_UNAUTHORIZED,
60 403: RESPONSE_FORBIDDEN,
61 404: RESPONSE_NOTFOUND,
62 },
63 )
64 def post(
65 self, request: Request, project, monitor, organization_slug: str | None = None
66 ) -> Response:
67 """
68 Creates a new check-in for a monitor.
69
70 If `status` is not present, it will be assumed that the check-in is starting, and be marked as `in_progress`.
71
72 To achieve a ping-like behavior, you can simply define `status` and optionally `duration` and
73 this check-in will be automatically marked as finished.
74
75 Note: If a DSN is utilized for authentication, the response will be limited in details.
76 """
77 if monitor.status in [MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS]:
78 return self.respond(status=404)
79
80 serializer = MonitorCheckInValidator(
81 data=request.data, context={"project": project, "request": request}
82 )
83 if not serializer.is_valid():
84 return self.respond(serializer.errors, status=400)
85
86 if ratelimits.is_limited(
87 f"monitor-checkins:{monitor.id}",
88 limit=CHECKIN_QUOTA_LIMIT,
89 window=CHECKIN_QUOTA_WINDOW,
90 ):
91 metrics.incr("monitors.checkin.dropped.ratelimited")
92 raise Throttled(
93 detail="Rate limited, please send no more than 5 checkins per minute per monitor"
94 )
95
96 result = serializer.validated_data
97
98 with transaction.atomic():
99 environment_name = result.get("environment")
100 if not environment_name:
101 environment_name = "production"
102
103 environment = Environment.get_or_create(project=project, name=environment_name)
104
105 monitorenvironment_defaults = {
106 "status": monitor.status,
107 "next_checkin": monitor.next_checkin,
108 "last_checkin": monitor.last_checkin,
109 }
110 monitor_environment = MonitorEnvironment.objects.get_or_create(
111 monitor=monitor, environment=environment, defaults=monitorenvironment_defaults
112 )[0]
113
114 checkin = MonitorCheckIn.objects.create(
115 project_id=project.id,
116 monitor_id=monitor.id,
117 monitor_environment=monitor_environment,
118 duration=result.get("duration"),
119 status=getattr(CheckInStatus, result["status"].upper()),
120 )
121
122 if not project.flags.has_cron_checkins:
123 # Backfill users that already have cron monitors
124 if not project.flags.has_cron_monitors:
125 first_cron_monitor_created.send_robust(
126 project=project, user=None, sender=Project
127 )
128 first_cron_checkin_received.send_robust(
129 project=project, monitor_id=str(monitor.guid), sender=Project
130 )
131
132 if checkin.status == CheckInStatus.ERROR and monitor.status != MonitorStatus.DISABLED:
133 monitor_failed = monitor.mark_failed(last_checkin=checkin.date_added)
134 monitor_environment.mark_failed(last_checkin=checkin.date_added)
135 if not monitor_failed:
136 if isinstance(request.auth, ProjectKey):
137 return self.respond(status=200)
138 return self.respond(serialize(checkin, request.user), status=200)
139 else:
140 monitor_params = {
141 "last_checkin": checkin.date_added,
142 "next_checkin": monitor.get_next_scheduled_checkin(checkin.date_added),
143 }
144 if checkin.status == CheckInStatus.OK and monitor.status != MonitorStatus.DISABLED:
145 monitor_params["status"] = MonitorStatus.OK
146 Monitor.objects.filter(id=monitor.id).exclude(
147 last_checkin__gt=checkin.date_added
148 ).update(**monitor_params)
149 MonitorEnvironment.objects.filter(id=monitor_environment.id).exclude(
150 last_checkin__gt=checkin.date_added
151 ).update(**monitor_params)
152
153 if isinstance(request.auth, ProjectKey):
154 return self.respond({"id": str(checkin.guid)}, status=201)
155
156 response = self.respond(serialize(checkin, request.user), status=201)
157 # TODO(dcramer): this should return a single aboslute uri, aka ALWAYS including org domains if enabled
158 # TODO(dcramer): both of these are patterns that we should make easier to accomplish in other endpoints
159 response["Link"] = self.build_link_header(request, "checkins/latest/", rel="latest")
160 response["Location"] = request.build_absolute_uri(f"checkins/{checkin.guid}/")
161 return response
162
[end of src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
@@ -27,7 +27,9 @@
)
from sentry.monitors.serializers import MonitorCheckInSerializerResponse
from sentry.monitors.validators import MonitorCheckInValidator
+from sentry.ratelimits.config import RateLimitConfig
from sentry.signals import first_cron_checkin_received, first_cron_monitor_created
+from sentry.types.ratelimit import RateLimit, RateLimitCategory
from sentry.utils import metrics
from .base import MonitorIngestEndpoint
@@ -41,6 +43,16 @@
class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):
public = {"POST"}
+ rate_limits = RateLimitConfig(
+ limit_overrides={
+ "POST": {
+ RateLimitCategory.IP: RateLimit(40 * 60, 60),
+ RateLimitCategory.USER: RateLimit(40 * 60, 60),
+ RateLimitCategory.ORGANIZATION: RateLimit(40 * 60, 60),
+ }
+ },
+ )
+
@extend_schema(
operation_id="Create a new check-in",
parameters=[
| {"golden_diff": "diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n@@ -27,7 +27,9 @@\n )\n from sentry.monitors.serializers import MonitorCheckInSerializerResponse\n from sentry.monitors.validators import MonitorCheckInValidator\n+from sentry.ratelimits.config import RateLimitConfig\n from sentry.signals import first_cron_checkin_received, first_cron_monitor_created\n+from sentry.types.ratelimit import RateLimit, RateLimitCategory\n from sentry.utils import metrics\n \n from .base import MonitorIngestEndpoint\n@@ -41,6 +43,16 @@\n class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):\n public = {\"POST\"}\n \n+ rate_limits = RateLimitConfig(\n+ limit_overrides={\n+ \"POST\": {\n+ RateLimitCategory.IP: RateLimit(40 * 60, 60),\n+ RateLimitCategory.USER: RateLimit(40 * 60, 60),\n+ RateLimitCategory.ORGANIZATION: RateLimit(40 * 60, 60),\n+ }\n+ },\n+ )\n+\n @extend_schema(\n operation_id=\"Create a new check-in\",\n parameters=[\n", "issue": "Crons: Increase quota call API Sentry monitors \"check-in\"\n### Problem Statement\n\nDue multi-tenancy was reached quota call API Sentry monitors \"check-in\":\r\n\"You are attempting to use this endpoint too frequently. Limit is 40 requests in 1 seconds\"\n\n### Solution Brainstorm\n\nIncrease quota call API Sentry monitors \"check-in\" at least to 60 requests in 1 seconds\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom django.db import transaction\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.exceptions import Throttled\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import ratelimits\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.apidocs.constants import (\n RESPONSE_BAD_REQUEST,\n RESPONSE_FORBIDDEN,\n RESPONSE_NOTFOUND,\n RESPONSE_UNAUTHORIZED,\n)\nfrom sentry.apidocs.parameters import GLOBAL_PARAMS, MONITOR_PARAMS\nfrom sentry.apidocs.utils import inline_sentry_response_serializer\nfrom sentry.models import Environment, Project, ProjectKey\nfrom sentry.monitors.models import (\n CheckInStatus,\n Monitor,\n MonitorCheckIn,\n MonitorEnvironment,\n MonitorStatus,\n)\nfrom sentry.monitors.serializers import MonitorCheckInSerializerResponse\nfrom sentry.monitors.validators import MonitorCheckInValidator\nfrom sentry.signals import first_cron_checkin_received, first_cron_monitor_created\nfrom sentry.utils import metrics\n\nfrom .base import MonitorIngestEndpoint\n\nCHECKIN_QUOTA_LIMIT = 5\nCHECKIN_QUOTA_WINDOW = 60\n\n\n@region_silo_endpoint\n@extend_schema(tags=[\"Crons\"])\nclass MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):\n public = {\"POST\"}\n\n @extend_schema(\n operation_id=\"Create a new check-in\",\n parameters=[\n GLOBAL_PARAMS.ORG_SLUG,\n MONITOR_PARAMS.MONITOR_ID,\n ],\n request=MonitorCheckInValidator,\n responses={\n 200: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 201: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 400: RESPONSE_BAD_REQUEST,\n 401: RESPONSE_UNAUTHORIZED,\n 403: RESPONSE_FORBIDDEN,\n 404: RESPONSE_NOTFOUND,\n },\n )\n def post(\n self, request: Request, project, monitor, organization_slug: str | None = None\n ) -> Response:\n \"\"\"\n Creates a new check-in for a monitor.\n\n If `status` is not present, it will be assumed that the check-in is starting, and be marked as `in_progress`.\n\n To achieve a ping-like behavior, you can simply define `status` and optionally `duration` and\n this check-in will be automatically marked as finished.\n\n Note: If a DSN is utilized for authentication, the response will be limited in details.\n \"\"\"\n if monitor.status in [MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS]:\n return self.respond(status=404)\n\n serializer = MonitorCheckInValidator(\n data=request.data, context={\"project\": project, \"request\": request}\n )\n if not serializer.is_valid():\n return self.respond(serializer.errors, status=400)\n\n if ratelimits.is_limited(\n f\"monitor-checkins:{monitor.id}\",\n limit=CHECKIN_QUOTA_LIMIT,\n window=CHECKIN_QUOTA_WINDOW,\n ):\n metrics.incr(\"monitors.checkin.dropped.ratelimited\")\n raise Throttled(\n detail=\"Rate limited, please send no more than 5 checkins per minute per monitor\"\n )\n\n result = serializer.validated_data\n\n with transaction.atomic():\n environment_name = result.get(\"environment\")\n if not environment_name:\n environment_name = \"production\"\n\n environment = Environment.get_or_create(project=project, name=environment_name)\n\n monitorenvironment_defaults = {\n \"status\": monitor.status,\n \"next_checkin\": monitor.next_checkin,\n \"last_checkin\": monitor.last_checkin,\n }\n monitor_environment = MonitorEnvironment.objects.get_or_create(\n monitor=monitor, environment=environment, defaults=monitorenvironment_defaults\n )[0]\n\n checkin = MonitorCheckIn.objects.create(\n project_id=project.id,\n monitor_id=monitor.id,\n monitor_environment=monitor_environment,\n duration=result.get(\"duration\"),\n status=getattr(CheckInStatus, result[\"status\"].upper()),\n )\n\n if not project.flags.has_cron_checkins:\n # Backfill users that already have cron monitors\n if not project.flags.has_cron_monitors:\n first_cron_monitor_created.send_robust(\n project=project, user=None, sender=Project\n )\n first_cron_checkin_received.send_robust(\n project=project, monitor_id=str(monitor.guid), sender=Project\n )\n\n if checkin.status == CheckInStatus.ERROR and monitor.status != MonitorStatus.DISABLED:\n monitor_failed = monitor.mark_failed(last_checkin=checkin.date_added)\n monitor_environment.mark_failed(last_checkin=checkin.date_added)\n if not monitor_failed:\n if isinstance(request.auth, ProjectKey):\n return self.respond(status=200)\n return self.respond(serialize(checkin, request.user), status=200)\n else:\n monitor_params = {\n \"last_checkin\": checkin.date_added,\n \"next_checkin\": monitor.get_next_scheduled_checkin(checkin.date_added),\n }\n if checkin.status == CheckInStatus.OK and monitor.status != MonitorStatus.DISABLED:\n monitor_params[\"status\"] = MonitorStatus.OK\n Monitor.objects.filter(id=monitor.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n MonitorEnvironment.objects.filter(id=monitor_environment.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n\n if isinstance(request.auth, ProjectKey):\n return self.respond({\"id\": str(checkin.guid)}, status=201)\n\n response = self.respond(serialize(checkin, request.user), status=201)\n # TODO(dcramer): this should return a single aboslute uri, aka ALWAYS including org domains if enabled\n # TODO(dcramer): both of these are patterns that we should make easier to accomplish in other endpoints\n response[\"Link\"] = self.build_link_header(request, \"checkins/latest/\", rel=\"latest\")\n response[\"Location\"] = request.build_absolute_uri(f\"checkins/{checkin.guid}/\")\n return response\n", "path": "src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py"}]} | 2,383 | 327 |
gh_patches_debug_22228 | rasdani/github-patches | git_diff | iterative__dvc-2337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Two test_info_* tests are failing on tip of master
At the [tip of master right now](https://github.com/iterative/dvc/commit/32425e90691bfd4988eb0a2d70cdc4fdba910f49), the two tests `test_info_outside_of_repo` and `test_info_in_repo` are failing for me (but not in CI, so I'm guessing this is environment-specific), because the `dvc version` output does not contain the "Filesystem type" lines that the tests are expecting.
I'm on macOS 10.14.5 and running Python 3.7.3 installed with pyenv, in a fresh virtualenv configured as specified in https://dvc.org/doc/user-guide/contributing
Cut-n-pasted output from `python -m tests`:
```
______________________________________________________________________ test_info_outside_of_repo ______________________________________________________________________
[gw2] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python
repo_dir = <tests.basic_env.TestDirFixture object at 0x129f640b8>, caplog = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>
def test_info_outside_of_repo(repo_dir, caplog):
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
> assert re.search(
re.compile(r"Filesystem type \(workspace\): .*"), caplog.text
)
E AssertionError: assert None
E + where None = <function search at 0x109ed6ea0>(re.compile('Filesystem type \\(workspace\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n')
E + where <function search at 0x109ed6ea0> = re.search
E + and re.compile('Filesystem type \\(workspace\\): .*') = <function compile at 0x109ed7268>('Filesystem type \\(workspace\\): .*')
E + where <function compile at 0x109ed7268> = re.compile
E + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n' = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>.text
/Users/gbr/git/dvc/tests/func/test_version.py:31: AssertionError
------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------
-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------
INFO dvc.command.version:version.py:65 DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------
------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------
DEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40553.vmgjwy8n.3wkYFuPE6JggPk5TfN6ZXU'
__________________________________________________________________________ test_info_in_repo __________________________________________________________________________
[gw1] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python
dvc_repo = Repo: '/private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
caplog = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>
def test_info_in_repo(dvc_repo, caplog):
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
> assert re.search(
re.compile(r"Filesystem type \(cache directory\): .*"), caplog.text
)
E AssertionError: assert None
E + where None = <function search at 0x104e41ea0>(re.compile('Filesystem type \\(cache directory\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n')
E + where <function search at 0x104e41ea0> = re.search
E + and re.compile('Filesystem type \\(cache directory\\): .*') = <function compile at 0x104e42268>('Filesystem type \\(cache directory\\): .*')
E + where <function compile at 0x104e42268> = re.compile
E + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n' = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>.text
/Users/gbr/git/dvc/tests/func/test_version.py:13: AssertionError
------------------------------------------------------------------------ Captured stdout setup ------------------------------------------------------------------------
What's next?
------------
- Check out the documentation: https://dvc.org/doc
- Get help and share ideas: https://dvc.org/chat
- Star us on GitHub: https://github.com/iterative/dvc
------------------------------------------------------------------------- Captured log setup --------------------------------------------------------------------------
INFO dvc.repo.init:init.py:40 What's next?
------------
- Check out the documentation: https://dvc.org/doc
- Get help and share ideas: https://dvc.org/chat
- Star us on GitHub: https://github.com/iterative/dvc
------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------
DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------
INFO dvc.command.version:version.py:65 DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------
DEBUG: Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------
DEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
[...]
==== 2 failed, 619 passed, 41 skipped, 10 warnings in 73.48 seconds ====
```
</issue>
<code>
[start of dvc/command/version.py]
1 from __future__ import unicode_literals
2
3 import os
4 import platform
5 import argparse
6 import logging
7 import uuid
8
9 try:
10 import psutil
11 except ImportError:
12 psutil = None
13
14 from dvc.utils import is_binary
15 from dvc.utils.compat import pathlib
16 from dvc.repo import Repo
17 from dvc.command.base import CmdBaseNoRepo, append_doc_link
18 from dvc.version import __version__
19 from dvc.exceptions import DvcException, NotDvcRepoError
20 from dvc.system import System
21
22
23 logger = logging.getLogger(__name__)
24
25
26 class CmdVersion(CmdBaseNoRepo):
27 def run(self):
28 dvc_version = __version__
29 python_version = platform.python_version()
30 platform_type = platform.platform()
31 binary = is_binary()
32
33 info = (
34 "DVC version: {dvc_version}\n"
35 "Python version: {python_version}\n"
36 "Platform: {platform_type}\n"
37 "Binary: {binary}\n"
38 ).format(
39 dvc_version=dvc_version,
40 python_version=python_version,
41 platform_type=platform_type,
42 binary=binary,
43 )
44
45 if psutil:
46 try:
47 repo = Repo()
48 root_directory = repo.root_dir
49
50 info += (
51 "Cache: {cache}\n"
52 "Filesystem type (cache directory): {fs_cache}\n"
53 ).format(
54 cache=self.get_linktype_support_info(repo),
55 fs_cache=self.get_fs_type(repo.cache.local.cache_dir),
56 )
57
58 except NotDvcRepoError:
59 root_directory = os.getcwd()
60
61 info += ("Filesystem type (workspace): {fs_root}").format(
62 fs_root=self.get_fs_type(os.path.abspath(root_directory))
63 )
64
65 logger.info(info)
66 return 0
67
68 @staticmethod
69 def get_fs_type(path):
70 partition = {
71 pathlib.Path(part.mountpoint): (part.fstype, part.device)
72 for part in psutil.disk_partitions()
73 }
74 for parent in pathlib.Path(path).parents:
75 if parent in partition:
76 return partition[parent]
77 return ("unkown", "none")
78
79 @staticmethod
80 def get_linktype_support_info(repo):
81 links = {
82 "reflink": System.reflink,
83 "hardlink": System.hardlink,
84 "symlink": System.symlink,
85 }
86
87 fname = "." + str(uuid.uuid4())
88 src = os.path.join(repo.cache.local.cache_dir, fname)
89 open(src, "w").close()
90 dst = os.path.join(repo.root_dir, fname)
91
92 cache = []
93
94 for name, link in links.items():
95 try:
96 link(src, dst)
97 os.unlink(dst)
98 supported = True
99 except DvcException:
100 supported = False
101 cache.append(
102 "{name} - {supported}".format(
103 name=name, supported=True if supported else False
104 )
105 )
106 os.remove(src)
107
108 return ", ".join(cache)
109
110
111 def add_parser(subparsers, parent_parser):
112 VERSION_HELP = "Show DVC version and system/environment informaion."
113
114 version_parser = subparsers.add_parser(
115 "version",
116 parents=[parent_parser],
117 description=append_doc_link(VERSION_HELP, "version"),
118 help=VERSION_HELP,
119 formatter_class=argparse.RawDescriptionHelpFormatter,
120 )
121 version_parser.set_defaults(func=CmdVersion)
122
[end of dvc/command/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/command/version.py b/dvc/command/version.py
--- a/dvc/command/version.py
+++ b/dvc/command/version.py
@@ -42,22 +42,22 @@
binary=binary,
)
- if psutil:
- try:
- repo = Repo()
- root_directory = repo.root_dir
+ try:
+ repo = Repo()
+ root_directory = repo.root_dir
+
+ info += "Cache: {cache}\n".format(
+ cache=self.get_linktype_support_info(repo)
+ )
+ if psutil:
info += (
- "Cache: {cache}\n"
"Filesystem type (cache directory): {fs_cache}\n"
- ).format(
- cache=self.get_linktype_support_info(repo),
- fs_cache=self.get_fs_type(repo.cache.local.cache_dir),
- )
-
- except NotDvcRepoError:
- root_directory = os.getcwd()
+ ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))
+ except NotDvcRepoError:
+ root_directory = os.getcwd()
+ if psutil:
info += ("Filesystem type (workspace): {fs_root}").format(
fs_root=self.get_fs_type(os.path.abspath(root_directory))
)
| {"golden_diff": "diff --git a/dvc/command/version.py b/dvc/command/version.py\n--- a/dvc/command/version.py\n+++ b/dvc/command/version.py\n@@ -42,22 +42,22 @@\n binary=binary,\n )\n \n- if psutil:\n- try:\n- repo = Repo()\n- root_directory = repo.root_dir\n+ try:\n+ repo = Repo()\n+ root_directory = repo.root_dir\n+\n+ info += \"Cache: {cache}\\n\".format(\n+ cache=self.get_linktype_support_info(repo)\n+ )\n \n+ if psutil:\n info += (\n- \"Cache: {cache}\\n\"\n \"Filesystem type (cache directory): {fs_cache}\\n\"\n- ).format(\n- cache=self.get_linktype_support_info(repo),\n- fs_cache=self.get_fs_type(repo.cache.local.cache_dir),\n- )\n-\n- except NotDvcRepoError:\n- root_directory = os.getcwd()\n+ ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))\n+ except NotDvcRepoError:\n+ root_directory = os.getcwd()\n \n+ if psutil:\n info += (\"Filesystem type (workspace): {fs_root}\").format(\n fs_root=self.get_fs_type(os.path.abspath(root_directory))\n )\n", "issue": "Two test_info_* tests are failing on tip of master\nAt the [tip of master right now](https://github.com/iterative/dvc/commit/32425e90691bfd4988eb0a2d70cdc4fdba910f49), the two tests `test_info_outside_of_repo` and `test_info_in_repo` are failing for me (but not in CI, so I'm guessing this is environment-specific), because the `dvc version` output does not contain the \"Filesystem type\" lines that the tests are expecting.\r\n\r\nI'm on macOS 10.14.5 and running Python 3.7.3 installed with pyenv, in a fresh virtualenv configured as specified in https://dvc.org/doc/user-guide/contributing\r\n\r\nCut-n-pasted output from `python -m tests`:\r\n\r\n```\r\n______________________________________________________________________ test_info_outside_of_repo ______________________________________________________________________\r\n[gw2] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python\r\n\r\nrepo_dir = <tests.basic_env.TestDirFixture object at 0x129f640b8>, caplog = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>\r\n\r\n def test_info_outside_of_repo(repo_dir, caplog):\r\n assert main([\"version\"]) == 0\r\n\r\n assert re.search(re.compile(r\"DVC version: \\d+\\.\\d+\\.\\d+\"), caplog.text)\r\n assert re.search(re.compile(r\"Python version: \\d\\.\\d\\.\\d\"), caplog.text)\r\n assert re.search(re.compile(r\"Platform: .*\"), caplog.text)\r\n assert re.search(re.compile(r\"Binary: (True|False)\"), caplog.text)\r\n> assert re.search(\r\n re.compile(r\"Filesystem type \\(workspace\\): .*\"), caplog.text\r\n )\r\nE AssertionError: assert None\r\nE + where None = <function search at 0x109ed6ea0>(re.compile('Filesystem type \\\\(workspace\\\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n')\r\nE + where <function search at 0x109ed6ea0> = re.search\r\nE + and re.compile('Filesystem type \\\\(workspace\\\\): .*') = <function compile at 0x109ed7268>('Filesystem type \\\\(workspace\\\\): .*')\r\nE + where <function compile at 0x109ed7268> = re.compile\r\nE + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n' = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>.text\r\n\r\n/Users/gbr/git/dvc/tests/func/test_version.py:31: AssertionError\r\n------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------\r\n\r\n\r\n-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------\r\nINFO dvc.command.version:version.py:65 DVC version: 0.53.2\r\n Python version: 3.7.3\r\n Platform: Darwin-18.6.0-x86_64-i386-64bit\r\n Binary: False\r\n---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------\r\n\r\n\r\n------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------\r\nDEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40553.vmgjwy8n.3wkYFuPE6JggPk5TfN6ZXU'\r\n__________________________________________________________________________ test_info_in_repo __________________________________________________________________________\r\n[gw1] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python\r\n\r\ndvc_repo = Repo: '/private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\ncaplog = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>\r\n\r\n def test_info_in_repo(dvc_repo, caplog):\r\n assert main([\"version\"]) == 0\r\n\r\n assert re.search(re.compile(r\"DVC version: \\d+\\.\\d+\\.\\d+\"), caplog.text)\r\n assert re.search(re.compile(r\"Python version: \\d\\.\\d\\.\\d\"), caplog.text)\r\n assert re.search(re.compile(r\"Platform: .*\"), caplog.text)\r\n assert re.search(re.compile(r\"Binary: (True|False)\"), caplog.text)\r\n> assert re.search(\r\n re.compile(r\"Filesystem type \\(cache directory\\): .*\"), caplog.text\r\n )\r\nE AssertionError: assert None\r\nE + where None = <function search at 0x104e41ea0>(re.compile('Filesystem type \\\\(cache directory\\\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n')\r\nE + where <function search at 0x104e41ea0> = re.search\r\nE + and re.compile('Filesystem type \\\\(cache directory\\\\): .*') = <function compile at 0x104e42268>('Filesystem type \\\\(cache directory\\\\): .*')\r\nE + where <function compile at 0x104e42268> = re.compile\r\nE + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n' = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>.text\r\n\r\n/Users/gbr/git/dvc/tests/func/test_version.py:13: AssertionError\r\n------------------------------------------------------------------------ Captured stdout setup ------------------------------------------------------------------------\r\n\r\nWhat's next?\r\n------------\r\n- Check out the documentation: https://dvc.org/doc\r\n- Get help and share ideas: https://dvc.org/chat\r\n- Star us on GitHub: https://github.com/iterative/dvc\r\n------------------------------------------------------------------------- Captured log setup --------------------------------------------------------------------------\r\nINFO dvc.repo.init:init.py:40 What's next?\r\n------------\r\n- Check out the documentation: https://dvc.org/doc\r\n- Get help and share ideas: https://dvc.org/chat\r\n- Star us on GitHub: https://github.com/iterative/dvc\r\n------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------\r\n\r\nDVC version: 0.53.2\r\nPython version: 3.7.3\r\nPlatform: Darwin-18.6.0-x86_64-i386-64bit\r\nBinary: False\r\n\r\n-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------\r\nINFO dvc.command.version:version.py:65 DVC version: 0.53.2\r\n Python version: 3.7.3\r\n Platform: Darwin-18.6.0-x86_64-i386-64bit\r\n Binary: False\r\n---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------\r\n\r\nDEBUG: Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\n------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------\r\nDEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\n\r\n[...]\r\n\r\n==== 2 failed, 619 passed, 41 skipped, 10 warnings in 73.48 seconds ====\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport platform\nimport argparse\nimport logging\nimport uuid\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nfrom dvc.utils import is_binary\nfrom dvc.utils.compat import pathlib\nfrom dvc.repo import Repo\nfrom dvc.command.base import CmdBaseNoRepo, append_doc_link\nfrom dvc.version import __version__\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.system import System\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdVersion(CmdBaseNoRepo):\n def run(self):\n dvc_version = __version__\n python_version = platform.python_version()\n platform_type = platform.platform()\n binary = is_binary()\n\n info = (\n \"DVC version: {dvc_version}\\n\"\n \"Python version: {python_version}\\n\"\n \"Platform: {platform_type}\\n\"\n \"Binary: {binary}\\n\"\n ).format(\n dvc_version=dvc_version,\n python_version=python_version,\n platform_type=platform_type,\n binary=binary,\n )\n\n if psutil:\n try:\n repo = Repo()\n root_directory = repo.root_dir\n\n info += (\n \"Cache: {cache}\\n\"\n \"Filesystem type (cache directory): {fs_cache}\\n\"\n ).format(\n cache=self.get_linktype_support_info(repo),\n fs_cache=self.get_fs_type(repo.cache.local.cache_dir),\n )\n\n except NotDvcRepoError:\n root_directory = os.getcwd()\n\n info += (\"Filesystem type (workspace): {fs_root}\").format(\n fs_root=self.get_fs_type(os.path.abspath(root_directory))\n )\n\n logger.info(info)\n return 0\n\n @staticmethod\n def get_fs_type(path):\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype, part.device)\n for part in psutil.disk_partitions()\n }\n for parent in pathlib.Path(path).parents:\n if parent in partition:\n return partition[parent]\n return (\"unkown\", \"none\")\n\n @staticmethod\n def get_linktype_support_info(repo):\n links = {\n \"reflink\": System.reflink,\n \"hardlink\": System.hardlink,\n \"symlink\": System.symlink,\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, link in links.items():\n try:\n link(src, dst)\n os.unlink(dst)\n supported = True\n except DvcException:\n supported = False\n cache.append(\n \"{name} - {supported}\".format(\n name=name, supported=True if supported else False\n )\n )\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef add_parser(subparsers, parent_parser):\n VERSION_HELP = \"Show DVC version and system/environment informaion.\"\n\n version_parser = subparsers.add_parser(\n \"version\",\n parents=[parent_parser],\n description=append_doc_link(VERSION_HELP, \"version\"),\n help=VERSION_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n version_parser.set_defaults(func=CmdVersion)\n", "path": "dvc/command/version.py"}]} | 3,477 | 286 |
gh_patches_debug_44788 | rasdani/github-patches | git_diff | aws__aws-cli-2537 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reading/writing EMR key_pair_file configuration options behaves oddly
Version:
```
$ aws --version
aws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38
```
[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`
By that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.
```
$ aws configure set emr.key_pair_file /tmp/foo
$ cat config
[default]
emr =
key_pair_file = /tmp/foo
$ aws configure get emr.key_pair_file
$ echo $?
1
```
As you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).
As an aside, trying to get the top level `emr` config item fails too;
```
$ aws configure get emr
expected a character buffer object
```
Additionally this item doesn't show up when `aws configure list` is run either;
```
$ aws configure list
Name Value Type Location
---- ----- ---- --------
profile <not set> None None
access_key REDACTED shared-credentials-file
secret_key REDACTED shared-credentials-file
region <not set> None None
```
</issue>
<code>
[start of awscli/customizations/configure/get.py]
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14
15 from awscli.customizations.commands import BasicCommand
16
17 from . import PREDEFINED_SECTION_NAMES
18
19
20 class ConfigureGetCommand(BasicCommand):
21 NAME = 'get'
22 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
23 '_description.rst')
24 SYNOPSIS = ('aws configure get varname [--profile profile-name]')
25 EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
26 ARG_TABLE = [
27 {'name': 'varname',
28 'help_text': 'The name of the config value to retrieve.',
29 'action': 'store',
30 'cli_type_name': 'string', 'positional_arg': True},
31 ]
32
33 def __init__(self, session, stream=sys.stdout):
34 super(ConfigureGetCommand, self).__init__(session)
35 self._stream = stream
36
37 def _run_main(self, args, parsed_globals):
38 varname = args.varname
39 value = None
40 if '.' not in varname:
41 # get_scoped_config() returns the config variables in the config
42 # file (not the logical_var names), which is what we want.
43 config = self._session.get_scoped_config()
44 value = config.get(varname)
45 else:
46 value = self._get_dotted_config_value(varname)
47 if value is not None:
48 self._stream.write(value)
49 self._stream.write('\n')
50 return 0
51 else:
52 return 1
53
54 def _get_dotted_config_value(self, varname):
55 parts = varname.split('.')
56 num_dots = varname.count('.')
57 # Logic to deal with predefined sections like [preview], [plugin] and etc.
58 if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
59 full_config = self._session.full_config
60 section, config_name = varname.split('.')
61 value = full_config.get(section, {}).get(config_name)
62 if value is None:
63 # Try to retrieve it from the profile config.
64 value = full_config['profiles'].get(
65 section, {}).get(config_name)
66 return value
67 if parts[0] == 'profile':
68 profile_name = parts[1]
69 config_name = parts[2]
70 remaining = parts[3:]
71 # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
72 # If not, go further to check if varname starts with a known profile name
73 elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
74 profile_name = parts[0]
75 config_name = parts[1]
76 remaining = parts[2:]
77 else:
78 profile_name = self._session.get_config_variable('profile')
79 config_name = parts[0]
80 remaining = parts[1:]
81
82 value = self._session.full_config['profiles'].get(
83 profile_name, {}).get(config_name)
84 if len(remaining) == 1:
85 try:
86 value = value.get(remaining[-1])
87 except AttributeError:
88 value = None
89 return value
90
[end of awscli/customizations/configure/get.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py
--- a/awscli/customizations/configure/get.py
+++ b/awscli/customizations/configure/get.py
@@ -11,17 +11,21 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
+import logging
from awscli.customizations.commands import BasicCommand
+from awscli.compat import six
from . import PREDEFINED_SECTION_NAMES
+LOG = logging.getLogger(__name__)
+
class ConfigureGetCommand(BasicCommand):
NAME = 'get'
DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
'_description.rst')
- SYNOPSIS = ('aws configure get varname [--profile profile-name]')
+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'
EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
ARG_TABLE = [
{'name': 'varname',
@@ -30,13 +34,14 @@
'cli_type_name': 'string', 'positional_arg': True},
]
- def __init__(self, session, stream=sys.stdout):
+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):
super(ConfigureGetCommand, self).__init__(session)
self._stream = stream
+ self._error_stream = error_stream
def _run_main(self, args, parsed_globals):
varname = args.varname
- value = None
+
if '.' not in varname:
# get_scoped_config() returns the config variables in the config
# file (not the logical_var names), which is what we want.
@@ -44,17 +49,30 @@
value = config.get(varname)
else:
value = self._get_dotted_config_value(varname)
- if value is not None:
+
+ LOG.debug(u'Config value retrieved: %s' % value)
+
+ if isinstance(value, six.string_types):
self._stream.write(value)
self._stream.write('\n')
return 0
+ elif isinstance(value, dict):
+ # TODO: add support for this. We would need to print it off in
+ # the same format as the config file.
+ self._error_stream.write(
+ 'varname (%s) must reference a value, not a section or '
+ 'sub-section.' % varname
+ )
+ return 1
else:
return 1
def _get_dotted_config_value(self, varname):
parts = varname.split('.')
num_dots = varname.count('.')
- # Logic to deal with predefined sections like [preview], [plugin] and etc.
+
+ # Logic to deal with predefined sections like [preview], [plugin] and
+ # etc.
if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
full_config = self._session.full_config
section, config_name = varname.split('.')
@@ -64,18 +82,23 @@
value = full_config['profiles'].get(
section, {}).get(config_name)
return value
+
if parts[0] == 'profile':
profile_name = parts[1]
config_name = parts[2]
remaining = parts[3:]
- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
- # If not, go further to check if varname starts with a known profile name
- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
+ # Check if varname starts with 'default' profile (e.g.
+ # default.emr-dev.emr.instance_profile) If not, go further to check
+ # if varname starts with a known profile name
+ elif parts[0] == 'default' or (
+ parts[0] in self._session.full_config['profiles']):
profile_name = parts[0]
config_name = parts[1]
remaining = parts[2:]
else:
profile_name = self._session.get_config_variable('profile')
+ if profile_name is None:
+ profile_name = 'default'
config_name = parts[0]
remaining = parts[1:]
| {"golden_diff": "diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py\n--- a/awscli/customizations/configure/get.py\n+++ b/awscli/customizations/configure/get.py\n@@ -11,17 +11,21 @@\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n import sys\n+import logging\n \n from awscli.customizations.commands import BasicCommand\n+from awscli.compat import six\n \n from . import PREDEFINED_SECTION_NAMES\n \n+LOG = logging.getLogger(__name__)\n+\n \n class ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n- SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n@@ -30,13 +34,14 @@\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n \n- def __init__(self, session, stream=sys.stdout):\n+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n+ self._error_stream = error_stream\n \n def _run_main(self, args, parsed_globals):\n varname = args.varname\n- value = None\n+\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n@@ -44,17 +49,30 @@\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n- if value is not None:\n+\n+ LOG.debug(u'Config value retrieved: %s' % value)\n+\n+ if isinstance(value, six.string_types):\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n+ elif isinstance(value, dict):\n+ # TODO: add support for this. We would need to print it off in\n+ # the same format as the config file.\n+ self._error_stream.write(\n+ 'varname (%s) must reference a value, not a section or '\n+ 'sub-section.' % varname\n+ )\n+ return 1\n else:\n return 1\n \n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n- # Logic to deal with predefined sections like [preview], [plugin] and etc.\n+\n+ # Logic to deal with predefined sections like [preview], [plugin] and\n+ # etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n@@ -64,18 +82,23 @@\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n+\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n- # If not, go further to check if varname starts with a known profile name\n- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n+ # Check if varname starts with 'default' profile (e.g.\n+ # default.emr-dev.emr.instance_profile) If not, go further to check\n+ # if varname starts with a known profile name\n+ elif parts[0] == 'default' or (\n+ parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n+ if profile_name is None:\n+ profile_name = 'default'\n config_name = parts[0]\n remaining = parts[1:]\n", "issue": "Reading/writing EMR key_pair_file configuration options behaves oddly\nVersion:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38\r\n```\r\n\r\n[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`\r\n\r\nBy that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.\r\n\r\n```\r\n$ aws configure set emr.key_pair_file /tmp/foo\r\n\r\n$ cat config\r\n[default]\r\nemr =\r\n key_pair_file = /tmp/foo\r\n\r\n$ aws configure get emr.key_pair_file\r\n\r\n$ echo $?\r\n1\r\n```\r\n\r\nAs you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).\r\n\r\nAs an aside, trying to get the top level `emr` config item fails too;\r\n\r\n```\r\n$ aws configure get emr\r\n\r\nexpected a character buffer object\r\n```\r\n\r\nAdditionally this item doesn't show up when `aws configure list` is run either;\r\n\r\n```\r\n$ aws configure list\r\n Name Value Type Location\r\n ---- ----- ---- --------\r\n profile <not set> None None\r\naccess_key REDACTED shared-credentials-file\r\nsecret_key REDACTED shared-credentials-file\r\n region <not set> None None\r\n\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\n\nfrom awscli.customizations.commands import BasicCommand\n\nfrom . import PREDEFINED_SECTION_NAMES\n\n\nclass ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to retrieve.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n\n def __init__(self, session, stream=sys.stdout):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = None\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n config = self._session.get_scoped_config()\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n if value is not None:\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n else:\n return 1\n\n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n # Logic to deal with predefined sections like [preview], [plugin] and etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n value = full_config.get(section, {}).get(config_name)\n if value is None:\n # Try to retrieve it from the profile config.\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n # If not, go further to check if varname starts with a known profile name\n elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n config_name = parts[0]\n remaining = parts[1:]\n\n value = self._session.full_config['profiles'].get(\n profile_name, {}).get(config_name)\n if len(remaining) == 1:\n try:\n value = value.get(remaining[-1])\n except AttributeError:\n value = None\n return value\n", "path": "awscli/customizations/configure/get.py"}]} | 1,898 | 974 |
gh_patches_debug_24329 | rasdani/github-patches | git_diff | pulp__pulpcore-259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
</issue>
<code>
[start of pulpcore/app/models/base.py]
1 import uuid
2
3 from django.db import models
4 from django.db.models import options
5
6
7 class Model(models.Model):
8 """Base model class for all Pulp models.
9
10 Fields:
11 _created (models.DateTimeField): Created timestamp UTC.
12 _last_updated (models.DateTimeField): Last updated timestamp UTC.
13
14 References:
15
16 * https://docs.djangoproject.com/en/1.8/topics/db/models/#automatic-primary-key-fields
17
18 """
19 _id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
20 _created = models.DateTimeField(auto_now_add=True)
21 _last_updated = models.DateTimeField(auto_now=True, null=True)
22
23 class Meta:
24 abstract = True
25
26 def __str__(self):
27 try:
28 # if we have a name, use it
29 return '<{}: {}>'.format(self._meta.object_name, self.name)
30 except AttributeError:
31 # if we don't, use the pk
32 return '<{}: pk={}>'.format(self._meta.object_name, self.pk)
33
34 def __repr__(self):
35 return str(self)
36
37
38 class MasterModel(Model):
39 """Base model for the "Master" model in a "Master-Detail" relationship.
40
41 Provides methods for casting down to detail types, back up to the master type,
42 as well as a model field for tracking the type.
43
44 Attributes:
45
46 TYPE (str): Default constant value saved into the ``_type``
47 field of Model instances
48
49 Fields:
50
51 _type: The user-facing string identifying the detail type of this model
52
53 Warning:
54 Subclasses of this class rely on there being no other parent/child Model
55 relationships than the Master/Detail relationship. All subclasses must use
56 only abstract Model base classes for MasterModel to behave properly.
57 Specifically, OneToOneField relationships must not be used in any MasterModel
58 subclass.
59
60 """
61
62 # TYPE is the user-facing string that describes this type. It is used to construct API
63 # endpoints for Detail models, and will be seen in the URLs generated for those Detail models.
64 # It can also be used for filtering across a relation where a model is related to a Master
65 # model. Set this to something reasonable in Master and Detail model classes, e.g. when
66 # create a master model, like "Remote", its TYPE value could be "remote". Then, when
67 # creating a Remote Detail class like PackageRemote, its _type value could be "package",
68 # not "package_remote", since "package_remote" would be redundant in the context of
69 # a remote Master model.
70 TYPE = None
71
72 # This field must have a value when models are saved, and defaults to the value of
73 # the TYPE attribute on the Model being saved (seen above).
74 _type = models.TextField(null=False, default=None)
75
76 class Meta:
77 abstract = True
78
79 def save(self, *args, **kwargs):
80 # instances of "detail" models that subclass MasterModel are exposed
81 # on instances of MasterModel by the string stored in that model's TYPE attr.
82 # Storing this _type in a column on the MasterModel next to makes it trivial
83 # to filter for specific detail model types across master's relations.
84 # Prepend the TYPE defined on a detail model with a django app label.
85 # If a plugin sets the type field themselves, it's used as-is.
86 if not self._type:
87 self._type = '{app_label}.{type}'.format(app_label=self._meta.app_label,
88 type=self.TYPE)
89 return super().save(*args, **kwargs)
90
91 def cast(self):
92 """Return a "Detail" model instance of this master-detail pair.
93
94 If this model is already an instance of its detail type, it will return itself.
95 """
96 # Go through our related objects, find the one that's a subclass of this model
97 # on a OneToOneField, which identifies it as a potential detail relation.
98 for rel in self._meta.related_objects:
99 if rel.one_to_one and issubclass(rel.related_model, self._meta.model):
100 # The name of this relation is the name of the attr on the model instance.
101 # If that attr as a value, that means a row exists for this model in the
102 # related detail table. Cast and return this value, recursively following
103 # master/detail relationships down to the last table (the most detailed).
104 try:
105 return getattr(self, rel.name).cast()
106 except AttributeError:
107 continue
108 else:
109 # The for loop exited normally, there are no more detailed models than this
110 # one in this instance's master/detail ancestry, so return here.
111 return self
112
113 @property
114 def master(self):
115 """The "Master" model instance of this master-detail pair
116
117 If this is already the master model instance, it will return itself.
118 """
119 if self._meta.master_model:
120 return self._meta.master_model(pk=self.pk)
121 else:
122 return self
123
124 def __str__(self):
125 # similar to Model's __str__, but type-aware
126 cast = self.cast()
127 if cast is self:
128 return super().__str__()
129
130 try:
131 return '<{} (_type={}): {}>'.format(self._meta.object_name, cast.TYPE, cast.name)
132 except AttributeError:
133 return '<{} (_type={}): pk={}>'.format(self._meta.object_name, cast.TYPE, cast.pk)
134
135
136 # Add properties to model _meta info to support master/detail models
137 # If this property is not None on a Model, then that Model is a Detail Model.
138 # Doing this in a non-monkeypatch way would mean a lot of effort to achieve the same result
139 # (e.g. custom model metaclass, custom Options implementation, etc). These could be classmethods
140 # on Model classes, but it's easy enough to use the model's _meta namespace to do this, since
141 # that's where other methods like this exist in Django.
142 def master_model(options):
143 """
144 The Master model class of this Model's Master/Detail relationship.
145
146 Accessible at ``<model_class>._meta.master_model``, the Master model class in a Master/Detail
147 relationship is the most generic non-abstract Model in this model's multiple-table chain
148 of inheritance.
149
150 If this model is not a detail model, None will be returned.
151 """
152 # If this isn't even a MasterModel descendant, don't bother.
153 if not issubclass(options.model, MasterModel):
154 return None
155 try:
156 # The last item in this list is the oldest ancestor. Since the MasterModel usage
157 # is to declare your master by subclassing MasterModel, and MasterModel is abstract,
158 # the oldest ancestor model is the Master Model.
159 return options.get_parent_list()[-1]
160 except IndexError:
161 # Also None if this model is itself the master.
162 return None
163
164
165 options.Options.master_model = property(master_model)
166
[end of pulpcore/app/models/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -1,7 +1,9 @@
+from gettext import gettext as _
import uuid
from django.db import models
from django.db.models import options
+from django.db.models.base import ModelBase
class Model(models.Model):
@@ -35,7 +37,24 @@
return str(self)
-class MasterModel(Model):
+class MasterModelMeta(ModelBase):
+ def __new__(cls, name, bases, attrs, **kwargs):
+ """Override __new__ to set the default_related_name."""
+ if Model not in bases and MasterModel not in bases: # Only affects "Detail" models.
+ meta = attrs.get("Meta")
+ default_related_name = getattr(
+ meta, "default_related_name", None)
+ abstract = getattr(meta, "abstract", None)
+
+ if not default_related_name and not abstract:
+ raise Exception(_("The 'default_related_name' option has not been set for "
+ "{class_name}").format(class_name=name))
+
+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)
+ return new_class
+
+
+class MasterModel(Model, metaclass=MasterModelMeta):
"""Base model for the "Master" model in a "Master-Detail" relationship.
Provides methods for casting down to detail types, back up to the master type,
| {"golden_diff": "diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py\n--- a/pulpcore/app/models/base.py\n+++ b/pulpcore/app/models/base.py\n@@ -1,7 +1,9 @@\n+from gettext import gettext as _\n import uuid\n \n from django.db import models\n from django.db.models import options\n+from django.db.models.base import ModelBase\n \n \n class Model(models.Model):\n@@ -35,7 +37,24 @@\n return str(self)\n \n \n-class MasterModel(Model):\n+class MasterModelMeta(ModelBase):\n+ def __new__(cls, name, bases, attrs, **kwargs):\n+ \"\"\"Override __new__ to set the default_related_name.\"\"\"\n+ if Model not in bases and MasterModel not in bases: # Only affects \"Detail\" models.\n+ meta = attrs.get(\"Meta\")\n+ default_related_name = getattr(\n+ meta, \"default_related_name\", None)\n+ abstract = getattr(meta, \"abstract\", None)\n+\n+ if not default_related_name and not abstract:\n+ raise Exception(_(\"The 'default_related_name' option has not been set for \"\n+ \"{class_name}\").format(class_name=name))\n+\n+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)\n+ return new_class\n+\n+\n+class MasterModel(Model, metaclass=MasterModelMeta):\n \"\"\"Base model for the \"Master\" model in a \"Master-Detail\" relationship.\n \n Provides methods for casting down to detail types, back up to the master type,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import uuid\n\nfrom django.db import models\nfrom django.db.models import options\n\n\nclass Model(models.Model):\n \"\"\"Base model class for all Pulp models.\n\n Fields:\n _created (models.DateTimeField): Created timestamp UTC.\n _last_updated (models.DateTimeField): Last updated timestamp UTC.\n\n References:\n\n * https://docs.djangoproject.com/en/1.8/topics/db/models/#automatic-primary-key-fields\n\n \"\"\"\n _id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n _created = models.DateTimeField(auto_now_add=True)\n _last_updated = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n abstract = True\n\n def __str__(self):\n try:\n # if we have a name, use it\n return '<{}: {}>'.format(self._meta.object_name, self.name)\n except AttributeError:\n # if we don't, use the pk\n return '<{}: pk={}>'.format(self._meta.object_name, self.pk)\n\n def __repr__(self):\n return str(self)\n\n\nclass MasterModel(Model):\n \"\"\"Base model for the \"Master\" model in a \"Master-Detail\" relationship.\n\n Provides methods for casting down to detail types, back up to the master type,\n as well as a model field for tracking the type.\n\n Attributes:\n\n TYPE (str): Default constant value saved into the ``_type``\n field of Model instances\n\n Fields:\n\n _type: The user-facing string identifying the detail type of this model\n\n Warning:\n Subclasses of this class rely on there being no other parent/child Model\n relationships than the Master/Detail relationship. All subclasses must use\n only abstract Model base classes for MasterModel to behave properly.\n Specifically, OneToOneField relationships must not be used in any MasterModel\n subclass.\n\n \"\"\"\n\n # TYPE is the user-facing string that describes this type. It is used to construct API\n # endpoints for Detail models, and will be seen in the URLs generated for those Detail models.\n # It can also be used for filtering across a relation where a model is related to a Master\n # model. Set this to something reasonable in Master and Detail model classes, e.g. when\n # create a master model, like \"Remote\", its TYPE value could be \"remote\". Then, when\n # creating a Remote Detail class like PackageRemote, its _type value could be \"package\",\n # not \"package_remote\", since \"package_remote\" would be redundant in the context of\n # a remote Master model.\n TYPE = None\n\n # This field must have a value when models are saved, and defaults to the value of\n # the TYPE attribute on the Model being saved (seen above).\n _type = models.TextField(null=False, default=None)\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n # instances of \"detail\" models that subclass MasterModel are exposed\n # on instances of MasterModel by the string stored in that model's TYPE attr.\n # Storing this _type in a column on the MasterModel next to makes it trivial\n # to filter for specific detail model types across master's relations.\n # Prepend the TYPE defined on a detail model with a django app label.\n # If a plugin sets the type field themselves, it's used as-is.\n if not self._type:\n self._type = '{app_label}.{type}'.format(app_label=self._meta.app_label,\n type=self.TYPE)\n return super().save(*args, **kwargs)\n\n def cast(self):\n \"\"\"Return a \"Detail\" model instance of this master-detail pair.\n\n If this model is already an instance of its detail type, it will return itself.\n \"\"\"\n # Go through our related objects, find the one that's a subclass of this model\n # on a OneToOneField, which identifies it as a potential detail relation.\n for rel in self._meta.related_objects:\n if rel.one_to_one and issubclass(rel.related_model, self._meta.model):\n # The name of this relation is the name of the attr on the model instance.\n # If that attr as a value, that means a row exists for this model in the\n # related detail table. Cast and return this value, recursively following\n # master/detail relationships down to the last table (the most detailed).\n try:\n return getattr(self, rel.name).cast()\n except AttributeError:\n continue\n else:\n # The for loop exited normally, there are no more detailed models than this\n # one in this instance's master/detail ancestry, so return here.\n return self\n\n @property\n def master(self):\n \"\"\"The \"Master\" model instance of this master-detail pair\n\n If this is already the master model instance, it will return itself.\n \"\"\"\n if self._meta.master_model:\n return self._meta.master_model(pk=self.pk)\n else:\n return self\n\n def __str__(self):\n # similar to Model's __str__, but type-aware\n cast = self.cast()\n if cast is self:\n return super().__str__()\n\n try:\n return '<{} (_type={}): {}>'.format(self._meta.object_name, cast.TYPE, cast.name)\n except AttributeError:\n return '<{} (_type={}): pk={}>'.format(self._meta.object_name, cast.TYPE, cast.pk)\n\n\n# Add properties to model _meta info to support master/detail models\n# If this property is not None on a Model, then that Model is a Detail Model.\n# Doing this in a non-monkeypatch way would mean a lot of effort to achieve the same result\n# (e.g. custom model metaclass, custom Options implementation, etc). These could be classmethods\n# on Model classes, but it's easy enough to use the model's _meta namespace to do this, since\n# that's where other methods like this exist in Django.\ndef master_model(options):\n \"\"\"\n The Master model class of this Model's Master/Detail relationship.\n\n Accessible at ``<model_class>._meta.master_model``, the Master model class in a Master/Detail\n relationship is the most generic non-abstract Model in this model's multiple-table chain\n of inheritance.\n\n If this model is not a detail model, None will be returned.\n \"\"\"\n # If this isn't even a MasterModel descendant, don't bother.\n if not issubclass(options.model, MasterModel):\n return None\n try:\n # The last item in this list is the oldest ancestor. Since the MasterModel usage\n # is to declare your master by subclassing MasterModel, and MasterModel is abstract,\n # the oldest ancestor model is the Master Model.\n return options.get_parent_list()[-1]\n except IndexError:\n # Also None if this model is itself the master.\n return None\n\n\noptions.Options.master_model = property(master_model)\n", "path": "pulpcore/app/models/base.py"}]} | 2,580 | 336 |
gh_patches_debug_30466 | rasdani/github-patches | git_diff | vaexio__vaex-1150 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set
**Description**
If I use `df.func.where` with `isin`, I can't pickle the resulted state.
This is for machine learning pipelines.
reproduce:
```
import vaex
import pickle
df = vaex.from_dict({'a':[1,2,3]})
df['b'] = df.func.where(df['a'].isin([1]),1,2)
pickle.dumps(df.state_get())
...
TypeError: can't pickle vaex.superutils.ordered_set_int64 objects
```
**Software information**
- vaex-core==4.0.0a11
- Vaex was installed via: pip
- OS: Mac
</issue>
<code>
[start of packages/vaex-core/vaex/hash.py]
1 import os
2
3
4 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
5 if not on_rtd:
6 from .superutils import *
7 from . import superutils
8 ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
9
10
11 def counter_type_from_dtype(dtype, transient=True):
12 from .array_types import is_string_type
13 if is_string_type(dtype):
14 if transient:
15 postfix = 'string'
16 else:
17 postfix = 'string' # view not support atm
18 else:
19 postfix = str(dtype)
20 if postfix == '>f8':
21 postfix = 'float64'
22 if postfix == 'double': # arrow
23 postfix = 'float64'
24 name = 'counter_' + postfix
25 return globals()[name]
26
27 def ordered_set_type_from_dtype(dtype, transient=True):
28 from .array_types import is_string_type
29 if is_string_type(dtype):
30 if transient:
31 postfix = 'string'
32 else:
33 postfix = 'string' # not support atm
34 else:
35 postfix = str(dtype)
36 if postfix == '>f8':
37 postfix = 'float64'
38 name = 'ordered_set_' + postfix
39 return globals()[name]
40
41 def index_type_from_dtype(dtype, transient=True, prime_growth=False):
42 from .array_types import is_string_type
43 if is_string_type(dtype):
44 if transient:
45 postfix = 'string'
46 else:
47 postfix = 'string' # not support atm
48 else:
49 postfix = str(dtype)
50 if postfix == '>f8':
51 postfix = 'float64'
52 name = 'index_hash_' + postfix
53 if prime_growth:
54 name += "_prime_growth"
55 return globals()[name]
56
57 # from numpy import *
58 # import IPython
59 # IPython.embed()
[end of packages/vaex-core/vaex/hash.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py
--- a/packages/vaex-core/vaex/hash.py
+++ b/packages/vaex-core/vaex/hash.py
@@ -1,12 +1,19 @@
import os
+import copyreg
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
from .superutils import *
from . import superutils
+
ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
+ def pickle(x):
+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)
+ for cls in ordered_set:
+ copyreg.pickle(cls, pickle)
+
def counter_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
@@ -24,6 +31,7 @@
name = 'counter_' + postfix
return globals()[name]
+
def ordered_set_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -38,6 +46,7 @@
name = 'ordered_set_' + postfix
return globals()[name]
+
def index_type_from_dtype(dtype, transient=True, prime_growth=False):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -53,7 +62,3 @@
if prime_growth:
name += "_prime_growth"
return globals()[name]
-
-# from numpy import *
-# import IPython
-# IPython.embed()
\ No newline at end of file
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py\n--- a/packages/vaex-core/vaex/hash.py\n+++ b/packages/vaex-core/vaex/hash.py\n@@ -1,12 +1,19 @@\n import os\n+import copyreg\n \n \n on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n if not on_rtd:\n from .superutils import *\n from . import superutils\n+\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n \n+ def pickle(x):\n+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)\n+ for cls in ordered_set:\n+ copyreg.pickle(cls, pickle)\n+\n \n def counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n@@ -24,6 +31,7 @@\n name = 'counter_' + postfix\n return globals()[name]\n \n+\n def ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -38,6 +46,7 @@\n name = 'ordered_set_' + postfix\n return globals()[name]\n \n+\n def index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -53,7 +62,3 @@\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n-\n-# from numpy import *\n-# import IPython\n-# IPython.embed()\n\\ No newline at end of file\n", "issue": "[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set\n**Description**\r\nIf I use `df.func.where` with `isin`, I can't pickle the resulted state.\r\nThis is for machine learning pipelines. \r\n\r\nreproduce:\r\n```\r\nimport vaex\r\nimport pickle\r\n\r\ndf = vaex.from_dict({'a':[1,2,3]})\r\ndf['b'] = df.func.where(df['a'].isin([1]),1,2)\r\npickle.dumps(df.state_get())\r\n...\r\nTypeError: can't pickle vaex.superutils.ordered_set_int64 objects\r\n```\r\n\r\n\r\n**Software information**\r\n - vaex-core==4.0.0a11\r\n - Vaex was installed via: pip\r\n - OS: Mac\r\n \n", "before_files": [{"content": "import os\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd:\n from .superutils import *\n from . import superutils\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n\n\ndef counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # view not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n if postfix == 'double': # arrow\n postfix = 'float64'\n name = 'counter_' + postfix\n return globals()[name]\n\ndef ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'ordered_set_' + postfix\n return globals()[name]\n\ndef index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'index_hash_' + postfix\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n\n# from numpy import *\n# import IPython\n# IPython.embed()", "path": "packages/vaex-core/vaex/hash.py"}]} | 1,213 | 381 |
gh_patches_debug_15466 | rasdani/github-patches | git_diff | google__turbinia-811 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DockerContainersEnumerationTask JSON decode failure
```
DockerContainersEnumerationTask Task failed with exception: [Expecting value: line 1 column 1 (char 0)]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/__init__.py", line 895, in run_wrapper
self.result = self.run(evidence, self.result)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py", line 107, in run
containers_info = self.GetContainers(evidence)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py", line 84, in GetContainers
containers_info = json.loads(json_string)
File "/usr/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.6/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
```
e2e test -> https://plaso-ci.log2timeline.net/jenkins/view/Turbinia/job/turbinia-e2e-googleclouddisk-dev/59/
</issue>
<code>
[start of turbinia/workers/docker.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Task for running docker-explorer."""
15
16 from __future__ import unicode_literals
17
18 import json
19 import logging
20 import subprocess
21
22 from turbinia import TurbiniaException
23 from turbinia.evidence import DockerContainer
24 from turbinia.evidence import EvidenceState as state
25 from turbinia.lib import utils
26 from turbinia.workers import Priority
27 from turbinia.workers import TurbiniaTask
28 from turbinia.lib.docker_manager import GetDockerPath
29 from turbinia import config
30
31 log = logging.getLogger('turbinia')
32
33
34 class DockerContainersEnumerationTask(TurbiniaTask):
35 """Enumerates Docker containers on Linux"""
36
37 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
38
39 def GetContainers(self, evidence):
40 """Lists the containers from an input Evidence.
41
42 We use subprocess to run the DockerExplorer script, instead of using the
43 Python module, because we need to make sure all DockerExplorer code runs
44 as root.
45
46 Args:
47 evidence (Evidence): the input Evidence.
48
49 Returns:
50 a list(dict) containing information about the containers found.
51
52 Raises:
53 TurbiniaException: when the docker-explorer tool cannot be found or failed
54 to run.
55 """
56 config.LoadConfig()
57 docker_dir = GetDockerPath(evidence.mount_path)
58
59 containers_info = None
60
61 # TODO(rgayon): use docker-explorer exposed constant when
62 # https://github.com/google/docker-explorer/issues/80 is in.
63 de_binary = utils.get_exe_path('de.py')
64 if not de_binary:
65 raise TurbiniaException('Cannot find de.py in path')
66
67 docker_explorer_command = ['sudo', de_binary]
68
69 if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):
70 docker_explorer_command.append('-d')
71
72 docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])
73
74 log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))
75 try:
76 json_string = subprocess.check_output(docker_explorer_command).decode(
77 'utf-8')
78 except json.JSONDecodeError as e:
79 raise TurbiniaException(
80 'Error decoding JSON output from de.py: {0!s}'.format(e))
81 except subprocess.CalledProcessError as e:
82 raise TurbiniaException('de.py returned an error: {0!s}'.format(e))
83
84 containers_info = json.loads(json_string)
85
86 return containers_info
87
88 def run(self, evidence, result):
89 """Run the docker-explorer tool to list containerss.
90
91 Args:
92 evidence (Evidence object): The evidence to process
93 result (TurbiniaTaskResult): The object to place task results into.
94
95 Returns:
96 TurbiniaTaskResult object.
97 """
98
99 status_report = ''
100 success = False
101
102 status_report = (
103 'Error enumerating Docker containers, evidence has no mounted '
104 'filesystem')
105 found_containers = []
106 try:
107 containers_info = self.GetContainers(evidence)
108 for container_info in containers_info:
109 container_id = container_info.get('container_id')
110 found_containers.append(container_id)
111 container_evidence = DockerContainer(container_id=container_id)
112 result.add_evidence(container_evidence, evidence.config)
113 success = True
114 status_report = 'Found {0!s} containers: {1:s}'.format(
115 len(found_containers), ' '.join(found_containers))
116 except TurbiniaException as e:
117 status_report = 'Error enumerating Docker containers: {0!s}'.format(e)
118
119 result.report_priority = Priority.LOW
120 result.report_data = status_report
121 result.close(self, success=success, status=status_report)
122 return result
123
[end of turbinia/workers/docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py
--- a/turbinia/workers/docker.py
+++ b/turbinia/workers/docker.py
@@ -75,14 +75,14 @@
try:
json_string = subprocess.check_output(docker_explorer_command).decode(
'utf-8')
+ containers_info = json.loads(json_string)
except json.JSONDecodeError as e:
raise TurbiniaException(
- 'Error decoding JSON output from de.py: {0!s}'.format(e))
+ 'Error decoding JSON output from de.py: {0!s} {1!s}'.format(
+ e, json_string))
except subprocess.CalledProcessError as e:
raise TurbiniaException('de.py returned an error: {0!s}'.format(e))
- containers_info = json.loads(json_string)
-
return containers_info
def run(self, evidence, result):
| {"golden_diff": "diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py\n--- a/turbinia/workers/docker.py\n+++ b/turbinia/workers/docker.py\n@@ -75,14 +75,14 @@\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n+ containers_info = json.loads(json_string)\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n- 'Error decoding JSON output from de.py: {0!s}'.format(e))\n+ 'Error decoding JSON output from de.py: {0!s} {1!s}'.format(\n+ e, json_string))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n \n- containers_info = json.loads(json_string)\n-\n return containers_info\n \n def run(self, evidence, result):\n", "issue": "DockerContainersEnumerationTask JSON decode failure\n```\r\nDockerContainersEnumerationTask Task failed with exception: [Expecting value: line 1 column 1 (char 0)]\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/__init__.py\", line 895, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py\", line 107, in run\r\n containers_info = self.GetContainers(evidence)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py\", line 84, in GetContainers\r\n containers_info = json.loads(json_string)\r\n File \"/usr/lib/python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/lib/python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/usr/lib/python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\ne2e test -> https://plaso-ci.log2timeline.net/jenkins/view/Turbinia/job/turbinia-e2e-googleclouddisk-dev/59/\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running docker-explorer.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport subprocess\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import DockerContainer\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.lib import utils\nfrom turbinia.workers import Priority\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib.docker_manager import GetDockerPath\nfrom turbinia import config\n\nlog = logging.getLogger('turbinia')\n\n\nclass DockerContainersEnumerationTask(TurbiniaTask):\n \"\"\"Enumerates Docker containers on Linux\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def GetContainers(self, evidence):\n \"\"\"Lists the containers from an input Evidence.\n\n We use subprocess to run the DockerExplorer script, instead of using the\n Python module, because we need to make sure all DockerExplorer code runs\n as root.\n\n Args:\n evidence (Evidence): the input Evidence.\n\n Returns:\n a list(dict) containing information about the containers found.\n\n Raises:\n TurbiniaException: when the docker-explorer tool cannot be found or failed\n to run.\n \"\"\"\n config.LoadConfig()\n docker_dir = GetDockerPath(evidence.mount_path)\n\n containers_info = None\n\n # TODO(rgayon): use docker-explorer exposed constant when\n # https://github.com/google/docker-explorer/issues/80 is in.\n de_binary = utils.get_exe_path('de.py')\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n\n docker_explorer_command = ['sudo', de_binary]\n\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n\n docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])\n\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n 'Error decoding JSON output from de.py: {0!s}'.format(e))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n\n containers_info = json.loads(json_string)\n\n return containers_info\n\n def run(self, evidence, result):\n \"\"\"Run the docker-explorer tool to list containerss.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n status_report = ''\n success = False\n\n status_report = (\n 'Error enumerating Docker containers, evidence has no mounted '\n 'filesystem')\n found_containers = []\n try:\n containers_info = self.GetContainers(evidence)\n for container_info in containers_info:\n container_id = container_info.get('container_id')\n found_containers.append(container_id)\n container_evidence = DockerContainer(container_id=container_id)\n result.add_evidence(container_evidence, evidence.config)\n success = True\n status_report = 'Found {0!s} containers: {1:s}'.format(\n len(found_containers), ' '.join(found_containers))\n except TurbiniaException as e:\n status_report = 'Error enumerating Docker containers: {0!s}'.format(e)\n\n result.report_priority = Priority.LOW\n result.report_data = status_report\n result.close(self, success=success, status=status_report)\n return result\n", "path": "turbinia/workers/docker.py"}]} | 2,188 | 214 |
gh_patches_debug_12726 | rasdani/github-patches | git_diff | enthought__chaco-678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove chaco.chaco_version file
https://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py
We now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.
</issue>
<code>
[start of chaco/chaco_version.py]
1 # ------------------------------------------------------------------------------
2 # Copyright (c) 2005, Enthought, Inc.
3 # All rights reserved.
4 #
5 # This software is provided without warranty under the terms of the BSD
6 # license included in LICENSE.txt and may be redistributed only
7 # under the conditions described in the aforementioned license. The license
8 # is also available online at http://www.enthought.com/licenses/BSD.txt
9 # Thanks for using Enthought open source!
10 #
11 # Author: Enthought, Inc.
12 # Description: <Enthought library component>
13 # ------------------------------------------------------------------------------
14 """ Defines version numbering for the Chaco package.
15 """
16 major = 2
17 minor = 0
18 micro = 9
19
20 version = "%s.%s.%s" % (major, minor, micro)
21
22
23 release_level = "beta"
24 branch = ""
25 revision = version
26
[end of chaco/chaco_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py
deleted file mode 100644
--- a/chaco/chaco_version.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# ------------------------------------------------------------------------------
-# Copyright (c) 2005, Enthought, Inc.
-# All rights reserved.
-#
-# This software is provided without warranty under the terms of the BSD
-# license included in LICENSE.txt and may be redistributed only
-# under the conditions described in the aforementioned license. The license
-# is also available online at http://www.enthought.com/licenses/BSD.txt
-# Thanks for using Enthought open source!
-#
-# Author: Enthought, Inc.
-# Description: <Enthought library component>
-# ------------------------------------------------------------------------------
-""" Defines version numbering for the Chaco package.
-"""
-major = 2
-minor = 0
-micro = 9
-
-version = "%s.%s.%s" % (major, minor, micro)
-
-
-release_level = "beta"
-branch = ""
-revision = version
| {"golden_diff": "diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py\ndeleted file mode 100644\n--- a/chaco/chaco_version.py\n+++ /dev/null\n@@ -1,25 +0,0 @@\n-# ------------------------------------------------------------------------------\n-# Copyright (c) 2005, Enthought, Inc.\n-# All rights reserved.\n-#\n-# This software is provided without warranty under the terms of the BSD\n-# license included in LICENSE.txt and may be redistributed only\n-# under the conditions described in the aforementioned license. The license\n-# is also available online at http://www.enthought.com/licenses/BSD.txt\n-# Thanks for using Enthought open source!\n-#\n-# Author: Enthought, Inc.\n-# Description: <Enthought library component>\n-# ------------------------------------------------------------------------------\n-\"\"\" Defines version numbering for the Chaco package.\n-\"\"\"\n-major = 2\n-minor = 0\n-micro = 9\n-\n-version = \"%s.%s.%s\" % (major, minor, micro)\n-\n-\n-release_level = \"beta\"\n-branch = \"\"\n-revision = version\n", "issue": "Remove chaco.chaco_version file\nhttps://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py\r\n\r\nWe now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n#\n# Author: Enthought, Inc.\n# Description: <Enthought library component>\n# ------------------------------------------------------------------------------\n\"\"\" Defines version numbering for the Chaco package.\n\"\"\"\nmajor = 2\nminor = 0\nmicro = 9\n\nversion = \"%s.%s.%s\" % (major, minor, micro)\n\n\nrelease_level = \"beta\"\nbranch = \"\"\nrevision = version\n", "path": "chaco/chaco_version.py"}]} | 846 | 245 |
gh_patches_debug_41891 | rasdani/github-patches | git_diff | streamlink__streamlink-2134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Huomao plugin not work
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
I found huomao plugin seems not work, i can use browser to watch stream but
streamlink says no playable stream
<!-- Explain the plugin issue as thoroughly as you can. -->
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. https://www.huomao.com/9755
2. https://www.huomao.com/777777
3. https://www.huomao.com/888
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
-->
```
[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888
[plugin.huomao][error] Failed to extract stream_info.
error: No playable streams found on this URL: https://www.huomao.com/888
```
</issue>
<code>
[start of src/streamlink/plugins/huomao.py]
1 """
2 NOTE: Since a documented API is nowhere to be found for Huomao; this plugin
3 simply extracts the videos stream_id, stream_url and stream_quality by
4 scraping the HTML and JS of one of Huomaos mobile webpages.
5
6 When viewing a stream on huomao.com, the base URL references a room_id. This
7 room_id is mapped one-to-one to a stream_id which references the actual .flv
8 video. Both stream_id, stream_url and stream_quality can be found in the
9 HTML and JS source of the mobile_page. Since one stream can occur in many
10 different qualities, we scrape all stream_url and stream_quality occurrences
11 and return each option to the user.
12 """
13
14 import re
15
16 from streamlink.plugin import Plugin
17 from streamlink.stream import HTTPStream
18
19 # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
20 url_re = re.compile(r"""
21 (http(s)?://)?
22 (www\.)?
23 huomao
24 (\.tv|\.com)
25 /(?P<room_id>\d+)
26 """, re.VERBOSE)
27
28 # URL used to retrive the stream_id, stream_url and stream_quality based of
29 # a room_id.
30 mobile_url = "http://www.huomao.com/mobile/mob_live/{0}"
31
32 # Pattern for extracting the stream_id from the mobile_url HTML.
33 #
34 # Example from HTML:
35 # <input id="html_stream" value="efmrCH" type="hidden">
36 stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
37
38 # Pattern for extracting each stream_url, stream_quality_url and a prettified
39 # stream_quality_name used for quality naming.
40 #
41 # Example from HTML:
42 # "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
43 stream_info_pattern = re.compile(r"""
44 [1-9]:
45 \s+
46 '(?P<stream_url>(?:\w|\.|:|-|/)+)
47 '\+stream\+'
48 (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
49 /playlist.m3u8'
50 """, re.VERBOSE)
51
52
53 class Huomao(Plugin):
54 @classmethod
55 def can_handle_url(self, url):
56 return url_re.match(url)
57
58 def get_stream_id(self, html):
59 """Returns the stream_id contained in the HTML."""
60 stream_id = stream_id_pattern.search(html)
61
62 if not stream_id:
63 self.logger.error("Failed to extract stream_id.")
64
65 return stream_id.group("stream_id")
66
67 def get_stream_info(self, html):
68 """Returns a nested list of different stream options.
69
70 Each entry in the list will contain a stream_url, stream_quality_url
71 and stream_quality_name for each stream occurrence that was found in
72 the JS.
73 """
74 stream_info = stream_info_pattern.findall(html)
75
76 if not stream_info:
77 self.logger.error("Failed to extract stream_info.")
78
79 # Rename the "" quality to "source" by transforming the tuples to a
80 # list and reassigning.
81 stream_info_list = []
82 for info in stream_info:
83 if not info[2]:
84 stream_info_list.append([info[0], info[1], "source"])
85 else:
86 stream_info_list.append(list(info))
87
88 return stream_info_list
89
90 def _get_streams(self):
91 room_id = url_re.search(self.url).group("room_id")
92 html = self.session.http.get(mobile_url.format(room_id))
93 stream_id = self.get_stream_id(html.text)
94 stream_info = self.get_stream_info(html.text)
95
96 streams = {}
97 for info in stream_info:
98 streams[info[2]] = HTTPStream(self.session,
99 info[0] + stream_id + info[1] + ".flv")
100
101 return streams
102
103
104 __plugin__ = Huomao
105
[end of src/streamlink/plugins/huomao.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py
--- a/src/streamlink/plugins/huomao.py
+++ b/src/streamlink/plugins/huomao.py
@@ -4,8 +4,8 @@
scraping the HTML and JS of one of Huomaos mobile webpages.
When viewing a stream on huomao.com, the base URL references a room_id. This
-room_id is mapped one-to-one to a stream_id which references the actual .flv
-video. Both stream_id, stream_url and stream_quality can be found in the
+room_id is mapped one-to-one to a stream_id which references the actual .m3u8
+file. Both stream_id, stream_url and stream_quality can be found in the
HTML and JS source of the mobile_page. Since one stream can occur in many
different qualities, we scrape all stream_url and stream_quality occurrences
and return each option to the user.
@@ -14,7 +14,7 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HTTPStream
+from streamlink.stream import HLSStream
# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
url_re = re.compile(r"""
@@ -35,18 +35,15 @@
# <input id="html_stream" value="efmrCH" type="hidden">
stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
-# Pattern for extracting each stream_url, stream_quality_url and a prettified
+# Pattern for extracting each stream_url and
# stream_quality_name used for quality naming.
#
# Example from HTML:
-# "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
+# src="http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8"
stream_info_pattern = re.compile(r"""
- [1-9]:
- \s+
- '(?P<stream_url>(?:\w|\.|:|-|/)+)
- '\+stream\+'
- (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
- /playlist.m3u8'
+ (?P<stream_url>(?:[\w\/\.\-:]+)
+ \/[^_\"]+(?:_(?P<stream_quality_name>\d+))
+ ?/playlist.m3u8)
""", re.VERBOSE)
@@ -65,11 +62,11 @@
return stream_id.group("stream_id")
def get_stream_info(self, html):
- """Returns a nested list of different stream options.
+ """
+ Returns a nested list of different stream options.
- Each entry in the list will contain a stream_url, stream_quality_url
- and stream_quality_name for each stream occurrence that was found in
- the JS.
+ Each entry in the list will contain a stream_url and stream_quality_name
+ for each stream occurrence that was found in the JS.
"""
stream_info = stream_info_pattern.findall(html)
@@ -80,8 +77,8 @@
# list and reassigning.
stream_info_list = []
for info in stream_info:
- if not info[2]:
- stream_info_list.append([info[0], info[1], "source"])
+ if not info[1]:
+ stream_info_list.append([info[0], "source"])
else:
stream_info_list.append(list(info))
@@ -95,8 +92,8 @@
streams = {}
for info in stream_info:
- streams[info[2]] = HTTPStream(self.session,
- info[0] + stream_id + info[1] + ".flv")
+ if stream_id in info[0]:
+ streams[info[1]] = HLSStream(self.session, info[0])
return streams
| {"golden_diff": "diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py\n--- a/src/streamlink/plugins/huomao.py\n+++ b/src/streamlink/plugins/huomao.py\n@@ -4,8 +4,8 @@\n scraping the HTML and JS of one of Huomaos mobile webpages.\n \n When viewing a stream on huomao.com, the base URL references a room_id. This\n-room_id is mapped one-to-one to a stream_id which references the actual .flv\n-video. Both stream_id, stream_url and stream_quality can be found in the\n+room_id is mapped one-to-one to a stream_id which references the actual .m3u8\n+file. Both stream_id, stream_url and stream_quality can be found in the\n HTML and JS source of the mobile_page. Since one stream can occur in many\n different qualities, we scrape all stream_url and stream_quality occurrences\n and return each option to the user.\n@@ -14,7 +14,7 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HTTPStream\n+from streamlink.stream import HLSStream\n \n # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\n url_re = re.compile(r\"\"\"\n@@ -35,18 +35,15 @@\n # <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\n stream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n \n-# Pattern for extracting each stream_url, stream_quality_url and a prettified\n+# Pattern for extracting each stream_url and\n # stream_quality_name used for quality naming.\n #\n # Example from HTML:\n-# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\n+# src=\"http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8\"\n stream_info_pattern = re.compile(r\"\"\"\n- [1-9]:\n- \\s+\n- '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n- '\\+stream\\+'\n- (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n- /playlist.m3u8'\n+ (?P<stream_url>(?:[\\w\\/\\.\\-:]+)\n+ \\/[^_\\\"]+(?:_(?P<stream_quality_name>\\d+))\n+ ?/playlist.m3u8)\n \"\"\", re.VERBOSE)\n \n \n@@ -65,11 +62,11 @@\n return stream_id.group(\"stream_id\")\n \n def get_stream_info(self, html):\n- \"\"\"Returns a nested list of different stream options.\n+ \"\"\"\n+ Returns a nested list of different stream options.\n \n- Each entry in the list will contain a stream_url, stream_quality_url\n- and stream_quality_name for each stream occurrence that was found in\n- the JS.\n+ Each entry in the list will contain a stream_url and stream_quality_name\n+ for each stream occurrence that was found in the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n \n@@ -80,8 +77,8 @@\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n- if not info[2]:\n- stream_info_list.append([info[0], info[1], \"source\"])\n+ if not info[1]:\n+ stream_info_list.append([info[0], \"source\"])\n else:\n stream_info_list.append(list(info))\n \n@@ -95,8 +92,8 @@\n \n streams = {}\n for info in stream_info:\n- streams[info[2]] = HTTPStream(self.session,\n- info[0] + stream_id + info[1] + \".flv\")\n+ if stream_id in info[0]:\n+ streams[info[1]] = HLSStream(self.session, info[0])\n \n return streams\n", "issue": "Huomao plugin not work\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\nI found huomao plugin seems not work, i can use browser to watch stream but\r\nstreamlink says no playable stream\r\n\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. https://www.huomao.com/9755\r\n2. https://www.huomao.com/777777\r\n3. https://www.huomao.com/888\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\n[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888\r\n[plugin.huomao][error] Failed to extract stream_info.\r\nerror: No playable streams found on this URL: https://www.huomao.com/888\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nNOTE: Since a documented API is nowhere to be found for Huomao; this plugin\nsimply extracts the videos stream_id, stream_url and stream_quality by\nscraping the HTML and JS of one of Huomaos mobile webpages.\n\nWhen viewing a stream on huomao.com, the base URL references a room_id. This\nroom_id is mapped one-to-one to a stream_id which references the actual .flv\nvideo. Both stream_id, stream_url and stream_quality can be found in the\nHTML and JS source of the mobile_page. Since one stream can occur in many\ndifferent qualities, we scrape all stream_url and stream_quality occurrences\nand return each option to the user.\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HTTPStream\n\n# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\nurl_re = re.compile(r\"\"\"\n (http(s)?://)?\n (www\\.)?\n huomao\n (\\.tv|\\.com)\n /(?P<room_id>\\d+)\n\"\"\", re.VERBOSE)\n\n# URL used to retrive the stream_id, stream_url and stream_quality based of\n# a room_id.\nmobile_url = \"http://www.huomao.com/mobile/mob_live/{0}\"\n\n# Pattern for extracting the stream_id from the mobile_url HTML.\n#\n# Example from HTML:\n# <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\nstream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n\n# Pattern for extracting each stream_url, stream_quality_url and a prettified\n# stream_quality_name used for quality naming.\n#\n# Example from HTML:\n# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\nstream_info_pattern = re.compile(r\"\"\"\n [1-9]:\n \\s+\n '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n '\\+stream\\+'\n (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n /playlist.m3u8'\n\"\"\", re.VERBOSE)\n\n\nclass Huomao(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return url_re.match(url)\n\n def get_stream_id(self, html):\n \"\"\"Returns the stream_id contained in the HTML.\"\"\"\n stream_id = stream_id_pattern.search(html)\n\n if not stream_id:\n self.logger.error(\"Failed to extract stream_id.\")\n\n return stream_id.group(\"stream_id\")\n\n def get_stream_info(self, html):\n \"\"\"Returns a nested list of different stream options.\n\n Each entry in the list will contain a stream_url, stream_quality_url\n and stream_quality_name for each stream occurrence that was found in\n the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n\n if not stream_info:\n self.logger.error(\"Failed to extract stream_info.\")\n\n # Rename the \"\" quality to \"source\" by transforming the tuples to a\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n if not info[2]:\n stream_info_list.append([info[0], info[1], \"source\"])\n else:\n stream_info_list.append(list(info))\n\n return stream_info_list\n\n def _get_streams(self):\n room_id = url_re.search(self.url).group(\"room_id\")\n html = self.session.http.get(mobile_url.format(room_id))\n stream_id = self.get_stream_id(html.text)\n stream_info = self.get_stream_info(html.text)\n\n streams = {}\n for info in stream_info:\n streams[info[2]] = HTTPStream(self.session,\n info[0] + stream_id + info[1] + \".flv\")\n\n return streams\n\n\n__plugin__ = Huomao\n", "path": "src/streamlink/plugins/huomao.py"}]} | 2,021 | 889 |
gh_patches_debug_1500 | rasdani/github-patches | git_diff | lutris__lutris-2885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Runners list is not updated on a second install/uninstall
**Describe the bug**
The second time you are trying to install a runner on a Lutris session, the runner list won't be updated anymore.
**Current behavior**
The runner list is updated only once per install/uninstall for each runner
**Steps to reproduce**
1. For the sake of this testing, make sure you don't have ZDoom installed.
2. Open Lutris.
3. Open the 'Manage runners' window.
4. Install ZDoom runner.
4.1. As expected, the runner list from the main Lutris window has added the ZDoom entry.
5. Uninstall ZDoom runner.
5.1. As expected, the runner list from the main Lutris window removed the ZDoom entry.
6. Reinstall ZDoom runner.
6.1. The runner list from the main Lutris window haven't added the ZDoom entry!
</issue>
<code>
[start of lutris/gui/widgets/sidebar.py]
1 """Sidebar for the main window"""
2 # Standard Library
3 import os
4
5 # Third Party Libraries
6 from gi.repository import GObject, Gtk, Pango
7
8 # Lutris Modules
9 from lutris import pga, platforms, runners
10 from lutris.game import Game
11 from lutris.gui.config.runner import RunnerConfigDialog
12 from lutris.gui.dialogs.runner_install import RunnerInstallDialog
13 from lutris.gui.dialogs.runners import RunnersDialog
14 from lutris.util import datapath
15
16 TYPE = 0
17 SLUG = 1
18 ICON = 2
19 LABEL = 3
20 GAMECOUNT = 4
21
22
23 class SidebarRow(Gtk.ListBoxRow):
24
25 def __init__(self, id_, type_, name, icon):
26 super().__init__()
27 self.type = type_
28 self.id = id_
29 self.btn_box = None
30 self.runner = None
31
32 self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)
33
34 # Construct the left column icon space.
35 if icon:
36 self.box.add(icon)
37 else:
38 # Place a spacer if there is no loaded icon.
39 icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)
40 self.box.add(icon)
41
42 label = Gtk.Label(
43 label=name,
44 halign=Gtk.Align.START,
45 hexpand=True,
46 margin_top=6,
47 margin_bottom=6,
48 ellipsize=Pango.EllipsizeMode.END,
49 )
50 self.box.add(label)
51
52 self.add(self.box)
53
54 def _create_button_box(self):
55 self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)
56
57 # Creation is delayed because only installed runners can be imported
58 # and all visible boxes should be installed.
59 self.runner = runners.import_runner(self.id)()
60 entries = []
61 if self.runner.multiple_versions:
62 entries.append((
63 "system-software-install-symbolic",
64 "Manage Versions",
65 self.on_manage_versions,
66 ))
67 if self.runner.runnable_alone:
68 entries.append(("media-playback-start-symbolic", "Run", self.runner.run))
69 entries.append(("emblem-system-symbolic", "Configure", self.on_configure_runner))
70 for entry in entries:
71 btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)
72 image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)
73 image.show()
74 btn.add(image)
75 btn.connect("clicked", entry[2])
76 self.btn_box.add(btn)
77
78 self.box.add(self.btn_box)
79
80 def on_configure_runner(self, *args): # pylint: disable=unused-argument
81 RunnerConfigDialog(self.runner, parent=self.get_toplevel())
82
83 def on_manage_versions(self, *args): # pylint: disable=unused-argument
84 dlg_title = "Manage %s versions" % self.runner.name
85 RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)
86
87 def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ
88 if self.id is not None and self.type == "runner":
89 flags = self.get_state_flags()
90 if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:
91 if self.btn_box is None:
92 self._create_button_box()
93 self.btn_box.show()
94 elif self.btn_box is not None and self.btn_box.get_visible():
95 self.btn_box.hide()
96 Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)
97
98
99 class SidebarHeader(Gtk.Box):
100
101 def __init__(self, name):
102 super().__init__(orientation=Gtk.Orientation.VERTICAL)
103 self.get_style_context().add_class("sidebar-header")
104 label = Gtk.Label(
105 halign=Gtk.Align.START,
106 hexpand=True,
107 use_markup=True,
108 label="<b>{}</b>".format(name),
109 )
110 label.get_style_context().add_class("dim-label")
111 box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)
112 box.add(label)
113 self.add(box)
114 if name == "Runners":
115 manage_runners_button = Gtk.Button.new_from_icon_name("emblem-system-symbolic", Gtk.IconSize.MENU)
116 manage_runners_button.props.action_name = "win.manage-runners"
117 manage_runners_button.props.relief = Gtk.ReliefStyle.NONE
118 manage_runners_button.set_margin_right(16)
119 manage_runners_button.get_style_context().add_class("sidebar-button")
120 box.add(manage_runners_button)
121 self.add(Gtk.Separator())
122 self.show_all()
123
124
125 class SidebarListBox(Gtk.ListBox):
126 __gtype_name__ = "LutrisSidebar"
127
128 def __init__(self):
129 super().__init__()
130 self.get_style_context().add_class("sidebar")
131 self.installed_runners = []
132 self.active_platforms = pga.get_used_platforms()
133 self.runners = sorted(runners.__all__)
134 self.platforms = sorted(platforms.__all__)
135
136 GObject.add_emission_hook(RunnersDialog, "runner-installed", self.update)
137 GObject.add_emission_hook(RunnersDialog, "runner-removed", self.update)
138 GObject.add_emission_hook(Game, "game-updated", self.update)
139 GObject.add_emission_hook(Game, "game-removed", self.update)
140
141 # TODO: This should be in a more logical location
142 icon_theme = Gtk.IconTheme.get_default()
143 local_theme_path = os.path.join(datapath.get(), "icons")
144 if local_theme_path not in icon_theme.get_search_path():
145 icon_theme.prepend_search_path(local_theme_path)
146
147 all_row = SidebarRow(None, "runner", "All", None)
148 self.add(all_row)
149 self.select_row(all_row)
150 for runner in self.runners:
151 icon_name = runner.lower().replace(" ", "") + "-symbolic"
152 icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)
153 name = runners.import_runner(runner).human_name
154 self.add(SidebarRow(runner, "runner", name, icon))
155
156 self.add(SidebarRow(None, "platform", "All", None))
157 for platform in self.platforms:
158 icon_name = (platform.lower().replace(" ", "").replace("/", "_") + "-symbolic")
159 icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)
160 self.add(SidebarRow(platform, "platform", platform, icon))
161
162 self.set_filter_func(self._filter_func)
163 self.set_header_func(self._header_func)
164 self.update()
165 self.show_all()
166
167 def _filter_func(self, row):
168 if row is None:
169 return True
170 if row.type == "runner":
171 if row.id is None:
172 return True # 'All'
173 return row.id in self.installed_runners
174 if len(self.active_platforms) <= 1:
175 return False # Hide useless filter
176 if row.id is None: # 'All'
177 return True
178 return row.id in self.active_platforms
179
180 def _header_func(self, row, before):
181 if row.get_header():
182 return
183
184 if not before:
185 row.set_header(SidebarHeader("Runners"))
186 elif before.type == "runner" and row.type == "platform":
187 row.set_header(SidebarHeader("Platforms"))
188
189 def update(self, *args): # pylint: disable=unused-argument
190 self.installed_runners = [runner.name for runner in runners.get_installed()]
191 self.active_platforms = pga.get_used_platforms()
192 self.invalidate_filter()
193
[end of lutris/gui/widgets/sidebar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/gui/widgets/sidebar.py b/lutris/gui/widgets/sidebar.py
--- a/lutris/gui/widgets/sidebar.py
+++ b/lutris/gui/widgets/sidebar.py
@@ -190,3 +190,4 @@
self.installed_runners = [runner.name for runner in runners.get_installed()]
self.active_platforms = pga.get_used_platforms()
self.invalidate_filter()
+ return True
| {"golden_diff": "diff --git a/lutris/gui/widgets/sidebar.py b/lutris/gui/widgets/sidebar.py\n--- a/lutris/gui/widgets/sidebar.py\n+++ b/lutris/gui/widgets/sidebar.py\n@@ -190,3 +190,4 @@\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n+ return True\n", "issue": "Runners list is not updated on a second install/uninstall\n**Describe the bug**\r\nThe second time you are trying to install a runner on a Lutris session, the runner list won't be updated anymore.\r\n\r\n**Current behavior**\r\nThe runner list is updated only once per install/uninstall for each runner\r\n\r\n**Steps to reproduce**\r\n1. For the sake of this testing, make sure you don't have ZDoom installed.\r\n2. Open Lutris.\r\n3. Open the 'Manage runners' window.\r\n4. Install ZDoom runner.\r\n4.1. As expected, the runner list from the main Lutris window has added the ZDoom entry.\r\n5. Uninstall ZDoom runner.\r\n5.1. As expected, the runner list from the main Lutris window removed the ZDoom entry.\r\n6. Reinstall ZDoom runner.\r\n6.1. The runner list from the main Lutris window haven't added the ZDoom entry!\n", "before_files": [{"content": "\"\"\"Sidebar for the main window\"\"\"\n# Standard Library\nimport os\n\n# Third Party Libraries\nfrom gi.repository import GObject, Gtk, Pango\n\n# Lutris Modules\nfrom lutris import pga, platforms, runners\nfrom lutris.game import Game\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.dialogs.runners import RunnersDialog\nfrom lutris.util import datapath\n\nTYPE = 0\nSLUG = 1\nICON = 2\nLABEL = 3\nGAMECOUNT = 4\n\n\nclass SidebarRow(Gtk.ListBoxRow):\n\n def __init__(self, id_, type_, name, icon):\n super().__init__()\n self.type = type_\n self.id = id_\n self.btn_box = None\n self.runner = None\n\n self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n\n # Construct the left column icon space.\n if icon:\n self.box.add(icon)\n else:\n # Place a spacer if there is no loaded icon.\n icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n self.box.add(icon)\n\n label = Gtk.Label(\n label=name,\n halign=Gtk.Align.START,\n hexpand=True,\n margin_top=6,\n margin_bottom=6,\n ellipsize=Pango.EllipsizeMode.END,\n )\n self.box.add(label)\n\n self.add(self.box)\n\n def _create_button_box(self):\n self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)\n\n # Creation is delayed because only installed runners can be imported\n # and all visible boxes should be installed.\n self.runner = runners.import_runner(self.id)()\n entries = []\n if self.runner.multiple_versions:\n entries.append((\n \"system-software-install-symbolic\",\n \"Manage Versions\",\n self.on_manage_versions,\n ))\n if self.runner.runnable_alone:\n entries.append((\"media-playback-start-symbolic\", \"Run\", self.runner.run))\n entries.append((\"emblem-system-symbolic\", \"Configure\", self.on_configure_runner))\n for entry in entries:\n btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)\n image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)\n image.show()\n btn.add(image)\n btn.connect(\"clicked\", entry[2])\n self.btn_box.add(btn)\n\n self.box.add(self.btn_box)\n\n def on_configure_runner(self, *args): # pylint: disable=unused-argument\n RunnerConfigDialog(self.runner, parent=self.get_toplevel())\n\n def on_manage_versions(self, *args): # pylint: disable=unused-argument\n dlg_title = \"Manage %s versions\" % self.runner.name\n RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)\n\n def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ\n if self.id is not None and self.type == \"runner\":\n flags = self.get_state_flags()\n if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:\n if self.btn_box is None:\n self._create_button_box()\n self.btn_box.show()\n elif self.btn_box is not None and self.btn_box.get_visible():\n self.btn_box.hide()\n Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)\n\n\nclass SidebarHeader(Gtk.Box):\n\n def __init__(self, name):\n super().__init__(orientation=Gtk.Orientation.VERTICAL)\n self.get_style_context().add_class(\"sidebar-header\")\n label = Gtk.Label(\n halign=Gtk.Align.START,\n hexpand=True,\n use_markup=True,\n label=\"<b>{}</b>\".format(name),\n )\n label.get_style_context().add_class(\"dim-label\")\n box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)\n box.add(label)\n self.add(box)\n if name == \"Runners\":\n manage_runners_button = Gtk.Button.new_from_icon_name(\"emblem-system-symbolic\", Gtk.IconSize.MENU)\n manage_runners_button.props.action_name = \"win.manage-runners\"\n manage_runners_button.props.relief = Gtk.ReliefStyle.NONE\n manage_runners_button.set_margin_right(16)\n manage_runners_button.get_style_context().add_class(\"sidebar-button\")\n box.add(manage_runners_button)\n self.add(Gtk.Separator())\n self.show_all()\n\n\nclass SidebarListBox(Gtk.ListBox):\n __gtype_name__ = \"LutrisSidebar\"\n\n def __init__(self):\n super().__init__()\n self.get_style_context().add_class(\"sidebar\")\n self.installed_runners = []\n self.active_platforms = pga.get_used_platforms()\n self.runners = sorted(runners.__all__)\n self.platforms = sorted(platforms.__all__)\n\n GObject.add_emission_hook(RunnersDialog, \"runner-installed\", self.update)\n GObject.add_emission_hook(RunnersDialog, \"runner-removed\", self.update)\n GObject.add_emission_hook(Game, \"game-updated\", self.update)\n GObject.add_emission_hook(Game, \"game-removed\", self.update)\n\n # TODO: This should be in a more logical location\n icon_theme = Gtk.IconTheme.get_default()\n local_theme_path = os.path.join(datapath.get(), \"icons\")\n if local_theme_path not in icon_theme.get_search_path():\n icon_theme.prepend_search_path(local_theme_path)\n\n all_row = SidebarRow(None, \"runner\", \"All\", None)\n self.add(all_row)\n self.select_row(all_row)\n for runner in self.runners:\n icon_name = runner.lower().replace(\" \", \"\") + \"-symbolic\"\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n name = runners.import_runner(runner).human_name\n self.add(SidebarRow(runner, \"runner\", name, icon))\n\n self.add(SidebarRow(None, \"platform\", \"All\", None))\n for platform in self.platforms:\n icon_name = (platform.lower().replace(\" \", \"\").replace(\"/\", \"_\") + \"-symbolic\")\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n self.add(SidebarRow(platform, \"platform\", platform, icon))\n\n self.set_filter_func(self._filter_func)\n self.set_header_func(self._header_func)\n self.update()\n self.show_all()\n\n def _filter_func(self, row):\n if row is None:\n return True\n if row.type == \"runner\":\n if row.id is None:\n return True # 'All'\n return row.id in self.installed_runners\n if len(self.active_platforms) <= 1:\n return False # Hide useless filter\n if row.id is None: # 'All'\n return True\n return row.id in self.active_platforms\n\n def _header_func(self, row, before):\n if row.get_header():\n return\n\n if not before:\n row.set_header(SidebarHeader(\"Runners\"))\n elif before.type == \"runner\" and row.type == \"platform\":\n row.set_header(SidebarHeader(\"Platforms\"))\n\n def update(self, *args): # pylint: disable=unused-argument\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n", "path": "lutris/gui/widgets/sidebar.py"}]} | 2,867 | 94 |
gh_patches_debug_9747 | rasdani/github-patches | git_diff | getredash__redash-543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed to run mongodb query
When I try to run this mongodb on both local install or redash demo, a error raised "Error running query: string indices must be integers"
Below is the query, I think this is an error of python code since I can run other simple query just fine.
``` json
{
"collection": "Email",
"aggregate": [
{
"$group": {
"_id": {
"$dateToString": {
"format": "%Y-%m-%d",
"date": "$sendTime"
}
},
"sent": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$sent",
1
]
},
"then": 1,
"else": 0
}
}
},
"opened": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$opened",
1
]
},
"then": 1,
"else": 0
}
}
},
"clicked": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$clicked",
1
]
},
"then": 1,
"else": 0
}
}
}
}
},
{
"$limit": 10
}
]
}
```
</issue>
<code>
[start of redash/query_runner/mongodb.py]
1 import json
2 import datetime
3 import logging
4 import re
5 import time
6 from dateutil.parser import parse
7
8 from redash.utils import JSONEncoder
9 from redash.query_runner import *
10
11 logger = logging.getLogger(__name__)
12
13 try:
14 import pymongo
15 from bson.objectid import ObjectId
16 from bson.son import SON
17 enabled = True
18
19 except ImportError:
20 logger.warning("Missing dependencies. Please install pymongo.")
21 logger.warning("You can use pip: pip install pymongo")
22 enabled = False
23
24
25 TYPES_MAP = {
26 str: TYPE_STRING,
27 unicode: TYPE_STRING,
28 int: TYPE_INTEGER,
29 long: TYPE_INTEGER,
30 float: TYPE_FLOAT,
31 bool: TYPE_BOOLEAN,
32 datetime.datetime: TYPE_DATETIME,
33 }
34
35 date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
36
37 class MongoDBJSONEncoder(JSONEncoder):
38 def default(self, o):
39 if isinstance(o, ObjectId):
40 return str(o)
41
42 return super(MongoDBJSONEncoder, self).default(o)
43
44 # Simple query example:
45 #
46 # {
47 # "collection" : "my_collection",
48 # "query" : {
49 # "date" : {
50 # "$gt" : "ISODate(\"2015-01-15 11:41\")",
51 # },
52 # "type" : 1
53 # },
54 # "fields" : {
55 # "_id" : 1,
56 # "name" : 2
57 # },
58 # "sort" : [
59 # {
60 # "name" : "date",
61 # "direction" : -1
62 # }
63 # ]
64 #
65 # }
66 #
67 #
68 # Aggregation
69 # ===========
70 # Uses a syntax similar to the one used in PyMongo, however to support the
71 # correct order of sorting, it uses a regular list for the "$sort" operation
72 # that converts into a SON (sorted dictionary) object before execution.
73 #
74 # Aggregation query example:
75 #
76 # {
77 # "collection" : "things",
78 # "aggregate" : [
79 # {
80 # "$unwind" : "$tags"
81 # },
82 # {
83 # "$group" : {
84 # "_id" : "$tags",
85 # "count" : { "$sum" : 1 }
86 # }
87 # },
88 # {
89 # "$sort" : [
90 # {
91 # "name" : "count",
92 # "direction" : -1
93 # },
94 # {
95 # "name" : "_id",
96 # "direction" : -1
97 # }
98 # ]
99 # }
100 # ]
101 # }
102 #
103 #
104 class MongoDB(BaseQueryRunner):
105 @classmethod
106 def configuration_schema(cls):
107 return {
108 'type': 'object',
109 'properties': {
110 'connectionString': {
111 'type': 'string',
112 'title': 'Connection String'
113 },
114 'dbName': {
115 'type': 'string',
116 'title': "Database Name"
117 },
118 'replicaSetName': {
119 'type': 'string',
120 'title': 'Replica Set Name'
121 },
122 },
123 'required': ['connectionString']
124 }
125
126 @classmethod
127 def enabled(cls):
128 return enabled
129
130 @classmethod
131 def annotate_query(cls):
132 return False
133
134 def __init__(self, configuration_json):
135 super(MongoDB, self).__init__(configuration_json)
136
137 self.syntax = 'json'
138
139 self.db_name = self.configuration["dbName"]
140
141 self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
142
143 def _get_column_by_name(self, columns, column_name):
144 for c in columns:
145 if "name" in c and c["name"] == column_name:
146 return c
147
148 return None
149
150 def _fix_dates(self, data):
151 for k in data:
152 if isinstance(data[k], list):
153 for i in range(0, len(data[k])):
154 self._fix_dates(data[k][i])
155 elif isinstance(data[k], dict):
156 self._fix_dates(data[k])
157 else:
158 if isinstance(data[k], (str, unicode)):
159 self._convert_date(data, k)
160
161 def _convert_date(self, q, field_name):
162 m = date_regex.findall(q[field_name])
163 if len(m) > 0:
164 q[field_name] = parse(m[0], yearfirst=True)
165
166 def run_query(self, query):
167 if self.is_replica_set:
168 db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"])
169 else:
170 db_connection = pymongo.MongoClient(self.configuration["connectionString"])
171
172 db = db_connection[self.db_name]
173
174 logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
175 logger.debug("mongodb got query: %s", query)
176
177 try:
178 query_data = json.loads(query)
179 self._fix_dates(query_data)
180 except ValueError:
181 return None, "Invalid query format. The query is not a valid JSON."
182
183 if "collection" not in query_data:
184 return None, "'collection' must have a value to run a query"
185 else:
186 collection = query_data["collection"]
187
188 q = query_data.get("query", None)
189 f = None
190
191 aggregate = query_data.get("aggregate", None)
192 if aggregate:
193 for step in aggregate:
194 if "$sort" in step:
195 sort_list = []
196 for sort_item in step["$sort"]:
197 sort_list.append((sort_item["name"], sort_item["direction"]))
198
199 step["$sort"] = SON(sort_list)
200
201 if not aggregate:
202 s = None
203 if "sort" in query_data and query_data["sort"]:
204 s = []
205 for field in query_data["sort"]:
206 s.append((field["name"], field["direction"]))
207
208 if "fields" in query_data:
209 f = query_data["fields"]
210
211 s = None
212 if "sort" in query_data and query_data["sort"]:
213 s = []
214 for field_data in query_data["sort"]:
215 s.append((field_data["name"], field_data["direction"]))
216
217 columns = []
218 rows = []
219
220 cursor = None
221 if q or (not q and not aggregate):
222 if s:
223 cursor = db[collection].find(q, f).sort(s)
224 else:
225 cursor = db[collection].find(q, f)
226
227 if "skip" in query_data:
228 cursor = cursor.skip(query_data["skip"])
229
230 if "limit" in query_data:
231 cursor = cursor.limit(query_data["limit"])
232
233 elif aggregate:
234 r = db[collection].aggregate(aggregate)
235
236 # Backwards compatibility with older pymongo versions.
237 #
238 # Older pymongo version would return a dictionary from an aggregate command.
239 # The dict would contain a "result" key which would hold the cursor.
240 # Newer ones return pymongo.command_cursor.CommandCursor.
241 if isinstance(r, dict):
242 cursor = r["result"]
243 else:
244 cursor = r
245
246 for r in cursor:
247 for k in r:
248 if self._get_column_by_name(columns, k) is None:
249 columns.append({
250 "name": k,
251 "friendly_name": k,
252 "type": TYPES_MAP.get(type(r[k]), TYPE_STRING)
253 })
254
255 rows.append(r)
256
257 if f:
258 ordered_columns = []
259 for k in sorted(f, key=f.get):
260 ordered_columns.append(self._get_column_by_name(columns, k))
261
262 columns = ordered_columns
263
264 data = {
265 "columns": columns,
266 "rows": rows
267 }
268 error = None
269 json_data = json.dumps(data, cls=MongoDBJSONEncoder)
270
271 return json_data, error
272
273 register(MongoDB)
274
[end of redash/query_runner/mongodb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py
--- a/redash/query_runner/mongodb.py
+++ b/redash/query_runner/mongodb.py
@@ -151,7 +151,11 @@
for k in data:
if isinstance(data[k], list):
for i in range(0, len(data[k])):
- self._fix_dates(data[k][i])
+ if isinstance(data[k][i], (str, unicode)):
+ self._convert_date(data[k], i)
+ elif not isinstance(data[k][i], (int)):
+ self._fix_dates(data[k][i])
+
elif isinstance(data[k], dict):
self._fix_dates(data[k])
else:
| {"golden_diff": "diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py\n--- a/redash/query_runner/mongodb.py\n+++ b/redash/query_runner/mongodb.py\n@@ -151,7 +151,11 @@\n for k in data:\n if isinstance(data[k], list):\n for i in range(0, len(data[k])):\n- self._fix_dates(data[k][i])\n+ if isinstance(data[k][i], (str, unicode)):\n+ self._convert_date(data[k], i)\n+ elif not isinstance(data[k][i], (int)):\n+ self._fix_dates(data[k][i])\n+\n elif isinstance(data[k], dict):\n self._fix_dates(data[k])\n else:\n", "issue": "Failed to run mongodb query\nWhen I try to run this mongodb on both local install or redash demo, a error raised \"Error running query: string indices must be integers\"\n\nBelow is the query, I think this is an error of python code since I can run other simple query just fine.\n\n``` json\n{\n \"collection\": \"Email\",\n \"aggregate\": [\n {\n \"$group\": {\n \"_id\": {\n \"$dateToString\": {\n \"format\": \"%Y-%m-%d\",\n \"date\": \"$sendTime\"\n }\n },\n \"sent\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$sent\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"opened\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$opened\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"clicked\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$clicked\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n }\n }\n },\n {\n \"$limit\": 10\n }\n ]\n}\n```\n\n", "before_files": [{"content": "import json\nimport datetime\nimport logging\nimport re\nimport time\nfrom dateutil.parser import parse\n\nfrom redash.utils import JSONEncoder\nfrom redash.query_runner import *\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pymongo\n from bson.objectid import ObjectId\n from bson.son import SON\n enabled = True\n\nexcept ImportError:\n logger.warning(\"Missing dependencies. Please install pymongo.\")\n logger.warning(\"You can use pip: pip install pymongo\")\n enabled = False\n\n\nTYPES_MAP = {\n str: TYPE_STRING,\n unicode: TYPE_STRING,\n int: TYPE_INTEGER,\n long: TYPE_INTEGER,\n float: TYPE_FLOAT,\n bool: TYPE_BOOLEAN,\n datetime.datetime: TYPE_DATETIME,\n}\n\ndate_regex = re.compile(\"ISODate\\(\\\"(.*)\\\"\\)\", re.IGNORECASE)\n\nclass MongoDBJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n\n return super(MongoDBJSONEncoder, self).default(o)\n\n# Simple query example:\n#\n# {\n# \"collection\" : \"my_collection\",\n# \"query\" : {\n# \"date\" : {\n# \"$gt\" : \"ISODate(\\\"2015-01-15 11:41\\\")\",\n# },\n# \"type\" : 1\n# },\n# \"fields\" : {\n# \"_id\" : 1,\n# \"name\" : 2\n# },\n# \"sort\" : [\n# {\n# \"name\" : \"date\",\n# \"direction\" : -1\n# }\n# ]\n#\n# }\n#\n#\n# Aggregation\n# ===========\n# Uses a syntax similar to the one used in PyMongo, however to support the\n# correct order of sorting, it uses a regular list for the \"$sort\" operation\n# that converts into a SON (sorted dictionary) object before execution.\n#\n# Aggregation query example:\n#\n# {\n# \"collection\" : \"things\",\n# \"aggregate\" : [\n# {\n# \"$unwind\" : \"$tags\"\n# },\n# {\n# \"$group\" : {\n# \"_id\" : \"$tags\",\n# \"count\" : { \"$sum\" : 1 }\n# }\n# },\n# {\n# \"$sort\" : [\n# {\n# \"name\" : \"count\",\n# \"direction\" : -1\n# },\n# {\n# \"name\" : \"_id\",\n# \"direction\" : -1\n# }\n# ]\n# }\n# ]\n# }\n#\n#\nclass MongoDB(BaseQueryRunner):\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'connectionString': {\n 'type': 'string',\n 'title': 'Connection String'\n },\n 'dbName': {\n 'type': 'string',\n 'title': \"Database Name\"\n },\n 'replicaSetName': {\n 'type': 'string',\n 'title': 'Replica Set Name'\n },\n },\n 'required': ['connectionString']\n }\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration_json):\n super(MongoDB, self).__init__(configuration_json)\n\n self.syntax = 'json'\n\n self.db_name = self.configuration[\"dbName\"]\n\n self.is_replica_set = True if \"replicaSetName\" in self.configuration and self.configuration[\"replicaSetName\"] else False\n\n def _get_column_by_name(self, columns, column_name):\n for c in columns:\n if \"name\" in c and c[\"name\"] == column_name:\n return c\n\n return None\n\n def _fix_dates(self, data):\n for k in data:\n if isinstance(data[k], list):\n for i in range(0, len(data[k])):\n self._fix_dates(data[k][i])\n elif isinstance(data[k], dict):\n self._fix_dates(data[k])\n else:\n if isinstance(data[k], (str, unicode)):\n self._convert_date(data, k)\n\n def _convert_date(self, q, field_name):\n m = date_regex.findall(q[field_name])\n if len(m) > 0:\n q[field_name] = parse(m[0], yearfirst=True)\n\n def run_query(self, query):\n if self.is_replica_set:\n db_connection = pymongo.MongoReplicaSetClient(self.configuration[\"connectionString\"], replicaSet=self.configuration[\"replicaSetName\"])\n else:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"])\n\n db = db_connection[self.db_name]\n\n logger.debug(\"mongodb connection string: %s\", self.configuration['connectionString'])\n logger.debug(\"mongodb got query: %s\", query)\n\n try:\n query_data = json.loads(query)\n self._fix_dates(query_data)\n except ValueError:\n return None, \"Invalid query format. The query is not a valid JSON.\"\n\n if \"collection\" not in query_data:\n return None, \"'collection' must have a value to run a query\"\n else:\n collection = query_data[\"collection\"]\n\n q = query_data.get(\"query\", None)\n f = None\n\n aggregate = query_data.get(\"aggregate\", None)\n if aggregate:\n for step in aggregate:\n if \"$sort\" in step:\n sort_list = []\n for sort_item in step[\"$sort\"]:\n sort_list.append((sort_item[\"name\"], sort_item[\"direction\"]))\n\n step[\"$sort\"] = SON(sort_list)\n\n if not aggregate:\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field in query_data[\"sort\"]:\n s.append((field[\"name\"], field[\"direction\"]))\n\n if \"fields\" in query_data:\n f = query_data[\"fields\"]\n\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field_data in query_data[\"sort\"]:\n s.append((field_data[\"name\"], field_data[\"direction\"]))\n\n columns = []\n rows = []\n\n cursor = None\n if q or (not q and not aggregate):\n if s:\n cursor = db[collection].find(q, f).sort(s)\n else:\n cursor = db[collection].find(q, f)\n\n if \"skip\" in query_data:\n cursor = cursor.skip(query_data[\"skip\"])\n\n if \"limit\" in query_data:\n cursor = cursor.limit(query_data[\"limit\"])\n\n elif aggregate:\n r = db[collection].aggregate(aggregate)\n\n # Backwards compatibility with older pymongo versions.\n #\n # Older pymongo version would return a dictionary from an aggregate command.\n # The dict would contain a \"result\" key which would hold the cursor.\n # Newer ones return pymongo.command_cursor.CommandCursor.\n if isinstance(r, dict):\n cursor = r[\"result\"]\n else:\n cursor = r\n\n for r in cursor:\n for k in r:\n if self._get_column_by_name(columns, k) is None:\n columns.append({\n \"name\": k,\n \"friendly_name\": k,\n \"type\": TYPES_MAP.get(type(r[k]), TYPE_STRING)\n })\n\n rows.append(r)\n\n if f:\n ordered_columns = []\n for k in sorted(f, key=f.get):\n ordered_columns.append(self._get_column_by_name(columns, k))\n\n columns = ordered_columns\n\n data = {\n \"columns\": columns,\n \"rows\": rows\n }\n error = None\n json_data = json.dumps(data, cls=MongoDBJSONEncoder)\n\n return json_data, error\n\nregister(MongoDB)\n", "path": "redash/query_runner/mongodb.py"}]} | 3,263 | 161 |
gh_patches_debug_1908 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SSL Timeout Error immediately when switching to interactive
#### PoC
```
from pwn import *
r = remote('google.com', 443, ssl=True)
r.interactive()
r.close()
```
It immediately results in:
```
[+] Opening connection to google.com on port 443: Done
[*] Switching to interactive mode
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 784, in recv_thread
cur = self.recv(timeout = 0.05)
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 78, in recv
return self._recv(numb, timeout) or ''
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 156, in _recv
if not self.buffer and not self._fillbuffer(timeout):
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 126, in _fillbuffer
data = self.recv_raw(self.buffer.get_fill_size())
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/sock.py", line 37, in recv_raw
data = self.sock.recv(numb, *a)
File "/usr/lib/python2.7/ssl.py", line 772, in recv
return self.read(buflen)
File "/usr/lib/python2.7/ssl.py", line 659, in read
v = self._sslobj.read(len)
SSLError: ('The read operation timed out',)
```
Note that doing so on a non-SSL server doesn't have this issue:
```
from pwn import *
r = remote('google.com', 80, ssl=False)
r.interactive()
r.close()
```
It allows you to type in HTTP Request in interactive mode, and return the server response without any issues.
```
GET /
```
```
<HTTP Responses>
```
Is the SSL feature is broken in pwntools?
</issue>
<code>
[start of pwnlib/tubes/sock.py]
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import errno
5 import select
6 import six
7 import socket
8
9 from pwnlib.log import getLogger
10 from pwnlib.tubes.tube import tube
11
12 log = getLogger(__name__)
13
14 class sock(tube):
15 """Base type used for :class:`.tubes.remote` and :class:`.tubes.listen` classes"""
16
17 def __init__(self, *args, **kwargs):
18 super(sock, self).__init__(*args, **kwargs)
19 self.closed = {"recv": False, "send": False}
20
21 # Overwritten for better usability
22 def recvall(self, timeout = tube.forever):
23 """recvall() -> str
24
25 Receives data until the socket is closed.
26 """
27
28 if getattr(self, 'type', None) == socket.SOCK_DGRAM:
29 self.error("UDP sockets does not supports recvall")
30 else:
31 return super(sock, self).recvall(timeout)
32
33 def recv_raw(self, numb, *a):
34 if self.closed["recv"]:
35 raise EOFError
36
37 while True:
38 try:
39 data = self.sock.recv(numb, *a)
40 break
41 except socket.timeout:
42 return None
43 except IOError as e:
44 if e.errno == errno.EAGAIN:
45 return None
46 elif e.errno in (errno.ECONNREFUSED, errno.ECONNRESET):
47 self.shutdown("recv")
48 raise EOFError
49 elif e.errno == errno.EINTR:
50 continue
51 else:
52 raise
53
54 if not data:
55 self.shutdown("recv")
56 raise EOFError
57
58 return data
59
60 def send_raw(self, data):
61 if self.closed["send"]:
62 raise EOFError
63
64 try:
65 self.sock.sendall(data)
66 except IOError as e:
67 eof_numbers = (errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED)
68 if e.errno in eof_numbers or 'Socket is closed' in e.args:
69 self.shutdown("send")
70 raise EOFError
71 else:
72 raise
73
74 def settimeout_raw(self, timeout):
75 if getattr(self, 'sock', None):
76 self.sock.settimeout(timeout)
77
78 def can_recv_raw(self, timeout):
79 """
80 Tests:
81
82 >>> l = listen()
83 >>> r = remote('localhost', l.lport)
84 >>> r.can_recv_raw(timeout=0)
85 False
86 >>> l.send(b'a')
87 >>> r.can_recv_raw(timeout=1)
88 True
89 >>> r.recv()
90 b'a'
91 >>> r.can_recv_raw(timeout=0)
92 False
93 >>> l.close()
94 >>> r.can_recv_raw(timeout=1)
95 False
96 >>> r.closed['recv']
97 True
98 """
99 if not self.sock or self.closed["recv"]:
100 return False
101
102 # select() will tell us data is available at EOF
103 can_recv = select.select([self.sock], [], [], timeout) == ([self.sock], [], [])
104
105 if not can_recv:
106 return False
107
108 # Ensure there's actually data, not just EOF
109 try:
110 self.recv_raw(1, socket.MSG_PEEK)
111 except EOFError:
112 return False
113
114 return True
115
116 def connected_raw(self, direction):
117 """
118 Tests:
119
120 >>> l = listen()
121 >>> r = remote('localhost', l.lport)
122 >>> r.connected()
123 True
124 >>> l.close()
125 >>> time.sleep(0.1) # Avoid race condition
126 >>> r.connected()
127 False
128 """
129 # If there's no socket, it's definitely closed
130 if not self.sock:
131 return False
132
133 # If we have noticed a connection close in a given direction before,
134 # return fast.
135 if self.closed.get(direction, False):
136 return False
137
138 # If a connection is closed in all manners, return fast
139 if all(self.closed.values()):
140 return False
141
142 # Use poll() to determine the connection state
143 want = {
144 'recv': select.POLLIN,
145 'send': select.POLLOUT,
146 'any': select.POLLIN | select.POLLOUT,
147 }[direction]
148
149 poll = select.poll()
150 poll.register(self, want | select.POLLHUP | select.POLLERR)
151
152 for fd, event in poll.poll(0):
153 if event & select.POLLHUP:
154 self.close()
155 return False
156 if event & select.POLLIN:
157 return True
158 if event & select.POLLOUT:
159 return True
160
161 return True
162
163 def close(self):
164 if not getattr(self, 'sock', None):
165 return
166
167 # Mark as closed in both directions
168 self.closed['send'] = True
169 self.closed['recv'] = True
170
171 self.sock.close()
172 self.sock = None
173 self._close_msg()
174
175 def _close_msg(self):
176 self.info('Closed connection to %s port %d' % (self.rhost, self.rport))
177
178 def fileno(self):
179 if not self.sock:
180 self.error("A closed socket does not have a file number")
181
182 return self.sock.fileno()
183
184 def shutdown_raw(self, direction):
185 if self.closed[direction]:
186 return
187
188 self.closed[direction] = True
189
190 if direction == "send":
191 try:
192 self.sock.shutdown(socket.SHUT_WR)
193 except IOError as e:
194 if e.errno == errno.ENOTCONN:
195 pass
196 else:
197 raise
198
199 if direction == "recv":
200 try:
201 self.sock.shutdown(socket.SHUT_RD)
202 except IOError as e:
203 if e.errno == errno.ENOTCONN:
204 pass
205 else:
206 raise
207
208 if False not in self.closed.values():
209 self.close()
210
211 @classmethod
212 def _get_family(cls, fam):
213 if isinstance(fam, six.integer_types):
214 pass
215 elif fam == 'any':
216 fam = socket.AF_UNSPEC
217 elif fam.lower() in ['ipv4', 'ip4', 'v4', '4']:
218 fam = socket.AF_INET
219 elif fam.lower() in ['ipv6', 'ip6', 'v6', '6']:
220 fam = socket.AF_INET6
221 else:
222 self.error("%s(): socket family %r is not supported",
223 cls.__name__,
224 fam)
225
226 return fam
227
228 @classmethod
229 def _get_type(cls, typ):
230 if isinstance(typ, six.integer_types):
231 pass
232 elif typ == "tcp":
233 typ = socket.SOCK_STREAM
234 elif typ == "udp":
235 typ = socket.SOCK_DGRAM
236 else:
237 self.error("%s(): socket type %r is not supported",
238 cls.__name__,
239 typ)
240
241 return typ
242
[end of pwnlib/tubes/sock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/tubes/sock.py b/pwnlib/tubes/sock.py
--- a/pwnlib/tubes/sock.py
+++ b/pwnlib/tubes/sock.py
@@ -48,6 +48,8 @@
raise EOFError
elif e.errno == errno.EINTR:
continue
+ elif 'timed out' in e.message:
+ return None
else:
raise
| {"golden_diff": "diff --git a/pwnlib/tubes/sock.py b/pwnlib/tubes/sock.py\n--- a/pwnlib/tubes/sock.py\n+++ b/pwnlib/tubes/sock.py\n@@ -48,6 +48,8 @@\n raise EOFError\n elif e.errno == errno.EINTR:\n continue\n+ elif 'timed out' in e.message:\n+ return None\n else:\n raise\n", "issue": "SSL Timeout Error immediately when switching to interactive\n#### PoC\r\n```\r\nfrom pwn import *\r\nr = remote('google.com', 443, ssl=True)\r\nr.interactive()\r\nr.close()\r\n```\r\nIt immediately results in:\r\n```\r\n[+] Opening connection to google.com on port 443: Done\r\n[*] Switching to interactive mode\r\nException in thread Thread-2:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python2.7/threading.py\", line 754, in run\r\n self.__target(*self.__args, **self.__kwargs)\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 784, in recv_thread\r\n cur = self.recv(timeout = 0.05)\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 78, in recv\r\n return self._recv(numb, timeout) or ''\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 156, in _recv\r\n if not self.buffer and not self._fillbuffer(timeout):\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 126, in _fillbuffer\r\n data = self.recv_raw(self.buffer.get_fill_size())\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/sock.py\", line 37, in recv_raw\r\n data = self.sock.recv(numb, *a)\r\n File \"/usr/lib/python2.7/ssl.py\", line 772, in recv\r\n return self.read(buflen)\r\n File \"/usr/lib/python2.7/ssl.py\", line 659, in read\r\n v = self._sslobj.read(len)\r\nSSLError: ('The read operation timed out',)\r\n\r\n```\r\n\r\nNote that doing so on a non-SSL server doesn't have this issue:\r\n```\r\nfrom pwn import *\r\nr = remote('google.com', 80, ssl=False)\r\nr.interactive()\r\nr.close()\r\n```\r\n\r\nIt allows you to type in HTTP Request in interactive mode, and return the server response without any issues.\r\n```\r\nGET /\r\n```\r\n```\r\n<HTTP Responses>\r\n```\r\n\r\nIs the SSL feature is broken in pwntools?\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport select\nimport six\nimport socket\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.tubes.tube import tube\n\nlog = getLogger(__name__)\n\nclass sock(tube):\n \"\"\"Base type used for :class:`.tubes.remote` and :class:`.tubes.listen` classes\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(sock, self).__init__(*args, **kwargs)\n self.closed = {\"recv\": False, \"send\": False}\n\n # Overwritten for better usability\n def recvall(self, timeout = tube.forever):\n \"\"\"recvall() -> str\n\n Receives data until the socket is closed.\n \"\"\"\n\n if getattr(self, 'type', None) == socket.SOCK_DGRAM:\n self.error(\"UDP sockets does not supports recvall\")\n else:\n return super(sock, self).recvall(timeout)\n\n def recv_raw(self, numb, *a):\n if self.closed[\"recv\"]:\n raise EOFError\n\n while True:\n try:\n data = self.sock.recv(numb, *a)\n break\n except socket.timeout:\n return None\n except IOError as e:\n if e.errno == errno.EAGAIN:\n return None\n elif e.errno in (errno.ECONNREFUSED, errno.ECONNRESET):\n self.shutdown(\"recv\")\n raise EOFError\n elif e.errno == errno.EINTR:\n continue\n else:\n raise\n\n if not data:\n self.shutdown(\"recv\")\n raise EOFError\n\n return data\n\n def send_raw(self, data):\n if self.closed[\"send\"]:\n raise EOFError\n\n try:\n self.sock.sendall(data)\n except IOError as e:\n eof_numbers = (errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED)\n if e.errno in eof_numbers or 'Socket is closed' in e.args:\n self.shutdown(\"send\")\n raise EOFError\n else:\n raise\n\n def settimeout_raw(self, timeout):\n if getattr(self, 'sock', None):\n self.sock.settimeout(timeout)\n\n def can_recv_raw(self, timeout):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.send(b'a')\n >>> r.can_recv_raw(timeout=1)\n True\n >>> r.recv()\n b'a'\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.close()\n >>> r.can_recv_raw(timeout=1)\n False\n >>> r.closed['recv']\n True\n \"\"\"\n if not self.sock or self.closed[\"recv\"]:\n return False\n\n # select() will tell us data is available at EOF\n can_recv = select.select([self.sock], [], [], timeout) == ([self.sock], [], [])\n\n if not can_recv:\n return False\n\n # Ensure there's actually data, not just EOF\n try:\n self.recv_raw(1, socket.MSG_PEEK)\n except EOFError:\n return False\n\n return True\n\n def connected_raw(self, direction):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.connected()\n True\n >>> l.close()\n >>> time.sleep(0.1) # Avoid race condition\n >>> r.connected()\n False\n \"\"\"\n # If there's no socket, it's definitely closed\n if not self.sock:\n return False\n\n # If we have noticed a connection close in a given direction before,\n # return fast.\n if self.closed.get(direction, False):\n return False\n\n # If a connection is closed in all manners, return fast\n if all(self.closed.values()):\n return False\n\n # Use poll() to determine the connection state\n want = {\n 'recv': select.POLLIN,\n 'send': select.POLLOUT,\n 'any': select.POLLIN | select.POLLOUT,\n }[direction]\n\n poll = select.poll()\n poll.register(self, want | select.POLLHUP | select.POLLERR)\n\n for fd, event in poll.poll(0):\n if event & select.POLLHUP:\n self.close()\n return False\n if event & select.POLLIN:\n return True\n if event & select.POLLOUT:\n return True\n\n return True\n\n def close(self):\n if not getattr(self, 'sock', None):\n return\n\n # Mark as closed in both directions\n self.closed['send'] = True\n self.closed['recv'] = True\n\n self.sock.close()\n self.sock = None\n self._close_msg()\n\n def _close_msg(self):\n self.info('Closed connection to %s port %d' % (self.rhost, self.rport))\n\n def fileno(self):\n if not self.sock:\n self.error(\"A closed socket does not have a file number\")\n\n return self.sock.fileno()\n\n def shutdown_raw(self, direction):\n if self.closed[direction]:\n return\n\n self.closed[direction] = True\n\n if direction == \"send\":\n try:\n self.sock.shutdown(socket.SHUT_WR)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if direction == \"recv\":\n try:\n self.sock.shutdown(socket.SHUT_RD)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if False not in self.closed.values():\n self.close()\n\n @classmethod\n def _get_family(cls, fam):\n if isinstance(fam, six.integer_types):\n pass\n elif fam == 'any':\n fam = socket.AF_UNSPEC\n elif fam.lower() in ['ipv4', 'ip4', 'v4', '4']:\n fam = socket.AF_INET\n elif fam.lower() in ['ipv6', 'ip6', 'v6', '6']:\n fam = socket.AF_INET6\n else:\n self.error(\"%s(): socket family %r is not supported\",\n cls.__name__,\n fam)\n\n return fam\n\n @classmethod\n def _get_type(cls, typ):\n if isinstance(typ, six.integer_types):\n pass\n elif typ == \"tcp\":\n typ = socket.SOCK_STREAM\n elif typ == \"udp\":\n typ = socket.SOCK_DGRAM\n else:\n self.error(\"%s(): socket type %r is not supported\",\n cls.__name__,\n typ)\n\n return typ\n", "path": "pwnlib/tubes/sock.py"}]} | 3,192 | 97 |
gh_patches_debug_5126 | rasdani/github-patches | git_diff | ipython__ipython-4092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nbconvert can't handle Heading with Chinese characters on Japanese Windows OS.
Convert following notebook by `ipython nbconvert test.ipynb` will raise Exception:
```
File "C:\Python27\lib\site-packages\ipython-1.0.0_dev-py2.7.egg\IPython\nbconv
ert\filters\strings.py", line 83, in add_anchor
h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1301, in XML
parser.feed(text)
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1643, in feed
self._raiseerror(v)
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1507, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 1, column 9
```
Here is the content of the notebook. I am using a Japanese Windows, the default encoding is:
```
In [1]: from IPython.utils import encoding
In [2]: encoding.DEFAULT_ENCODING
Out[2]: 'cp932'
```
When call `py3compat.cast_bytes_py2(html)` it can't convert the Chinese characters correctly.
```
{
"metadata": {
"name": ""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "heading",
"level": 1,
"metadata": {},
"source": [
"\u6269\u5c55\u7c7b\u578b(cdef\u7c7b)"
]
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"\n"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}
```
</issue>
<code>
[start of IPython/nbconvert/filters/strings.py]
1 # coding: utf-8
2 """String filters.
3
4 Contains a collection of useful string manipulation filters for use in Jinja
5 templates.
6 """
7 #-----------------------------------------------------------------------------
8 # Copyright (c) 2013, the IPython Development Team.
9 #
10 # Distributed under the terms of the Modified BSD License.
11 #
12 # The full license is in the file COPYING.txt, distributed with this software.
13 #-----------------------------------------------------------------------------
14
15 #-----------------------------------------------------------------------------
16 # Imports
17 #-----------------------------------------------------------------------------
18
19 import os
20 import re
21 import textwrap
22 from xml.etree import ElementTree
23
24 from IPython.core.interactiveshell import InteractiveShell
25 from IPython.utils import py3compat
26
27 #-----------------------------------------------------------------------------
28 # Functions
29 #-----------------------------------------------------------------------------
30
31 __all__ = [
32 'wrap_text',
33 'html2text',
34 'add_anchor',
35 'strip_dollars',
36 'strip_files_prefix',
37 'comment_lines',
38 'get_lines',
39 'ipython2python',
40 'posix_path',
41 ]
42
43
44 def wrap_text(text, width=100):
45 """
46 Intelligently wrap text.
47 Wrap text without breaking words if possible.
48
49 Parameters
50 ----------
51 text : str
52 Text to wrap.
53 width : int, optional
54 Number of characters to wrap to, default 100.
55 """
56
57 split_text = text.split('\n')
58 wrp = map(lambda x:textwrap.wrap(x,width), split_text)
59 wrpd = map('\n'.join, wrp)
60 return '\n'.join(wrpd)
61
62
63 def html2text(element):
64 """extract inner text from html
65
66 Analog of jQuery's $(element).text()
67 """
68 if isinstance(element, py3compat.string_types):
69 element = ElementTree.fromstring(element)
70
71 text = element.text or ""
72 for child in element:
73 text += html2text(child)
74 text += (element.tail or "")
75 return text
76
77
78 def add_anchor(html):
79 """Add an anchor-link to an html header tag
80
81 For use in heading cells
82 """
83 h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
84 link = html2text(h).replace(' ', '-')
85 h.set('id', link)
86 a = ElementTree.Element("a", {"class" : "anchor-link", "href" : "#" + link})
87 a.text = u'¶'
88 h.append(a)
89
90 # Known issue of Python3.x, ElementTree.tostring() returns a byte string
91 # instead of a text string. See issue http://bugs.python.org/issue10942
92 # Workaround is to make sure the bytes are casted to a string.
93 return py3compat.decode(ElementTree.tostring(h), 'utf-8')
94
95
96 def strip_dollars(text):
97 """
98 Remove all dollar symbols from text
99
100 Parameters
101 ----------
102 text : str
103 Text to remove dollars from
104 """
105
106 return text.strip('$')
107
108
109 files_url_pattern = re.compile(r'(src|href)\=([\'"]?)files/')
110
111 def strip_files_prefix(text):
112 """
113 Fix all fake URLs that start with `files/`,
114 stripping out the `files/` prefix.
115
116 Parameters
117 ----------
118 text : str
119 Text in which to replace 'src="files/real...' with 'src="real...'
120 """
121 return files_url_pattern.sub(r"\1=\2", text)
122
123
124 def comment_lines(text, prefix='# '):
125 """
126 Build a Python comment line from input text.
127
128 Parameters
129 ----------
130 text : str
131 Text to comment out.
132 prefix : str
133 Character to append to the start of each line.
134 """
135
136 #Replace line breaks with line breaks and comment symbols.
137 #Also add a comment symbol at the beginning to comment out
138 #the first line.
139 return prefix + ('\n'+prefix).join(text.split('\n'))
140
141
142 def get_lines(text, start=None,end=None):
143 """
144 Split the input text into separate lines and then return the
145 lines that the caller is interested in.
146
147 Parameters
148 ----------
149 text : str
150 Text to parse lines from.
151 start : int, optional
152 First line to grab from.
153 end : int, optional
154 Last line to grab from.
155 """
156
157 # Split the input into lines.
158 lines = text.split("\n")
159
160 # Return the right lines.
161 return "\n".join(lines[start:end]) #re-join
162
163 def ipython2python(code):
164 """Transform IPython syntax to pure Python syntax
165
166 Parameters
167 ----------
168
169 code : str
170 IPython code, to be transformed to pure Python
171 """
172 shell = InteractiveShell.instance()
173 return shell.input_transformer_manager.transform_cell(code)
174
175 def posix_path(path):
176 """Turn a path into posix-style path/to/etc
177
178 Mainly for use in latex on Windows,
179 where native Windows paths are not allowed.
180 """
181 if os.path.sep != '/':
182 return path.replace(os.path.sep, '/')
183 return path
184
[end of IPython/nbconvert/filters/strings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/nbconvert/filters/strings.py b/IPython/nbconvert/filters/strings.py
--- a/IPython/nbconvert/filters/strings.py
+++ b/IPython/nbconvert/filters/strings.py
@@ -80,7 +80,7 @@
For use in heading cells
"""
- h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
+ h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))
link = html2text(h).replace(' ', '-')
h.set('id', link)
a = ElementTree.Element("a", {"class" : "anchor-link", "href" : "#" + link})
| {"golden_diff": "diff --git a/IPython/nbconvert/filters/strings.py b/IPython/nbconvert/filters/strings.py\n--- a/IPython/nbconvert/filters/strings.py\n+++ b/IPython/nbconvert/filters/strings.py\n@@ -80,7 +80,7 @@\n \n For use in heading cells\n \"\"\"\n- h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n+ h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))\n link = html2text(h).replace(' ', '-')\n h.set('id', link)\n a = ElementTree.Element(\"a\", {\"class\" : \"anchor-link\", \"href\" : \"#\" + link})\n", "issue": "nbconvert can't handle Heading with Chinese characters on Japanese Windows OS.\nConvert following notebook by `ipython nbconvert test.ipynb` will raise Exception:\n\n```\n File \"C:\\Python27\\lib\\site-packages\\ipython-1.0.0_dev-py2.7.egg\\IPython\\nbconv\nert\\filters\\strings.py\", line 83, in add_anchor\n h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1301, in XML\n parser.feed(text)\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1643, in feed\n self._raiseerror(v)\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1507, in _raiseerror\n raise err\nParseError: not well-formed (invalid token): line 1, column 9\n```\n\nHere is the content of the notebook. I am using a Japanese Windows, the default encoding is:\n\n```\nIn [1]: from IPython.utils import encoding\n\nIn [2]: encoding.DEFAULT_ENCODING\nOut[2]: 'cp932'\n```\n\nWhen call `py3compat.cast_bytes_py2(html)` it can't convert the Chinese characters correctly.\n\n```\n{\n \"metadata\": {\n \"name\": \"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"heading\",\n \"level\": 1,\n \"metadata\": {},\n \"source\": [\n \"\\u6269\\u5c55\\u7c7b\\u578b(cdef\\u7c7b)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"\\n\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\n```\n\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"String filters.\n\nContains a collection of useful string manipulation filters for use in Jinja\ntemplates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport re\nimport textwrap\nfrom xml.etree import ElementTree\n\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.utils import py3compat\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\n\n__all__ = [\n 'wrap_text',\n 'html2text',\n 'add_anchor',\n 'strip_dollars',\n 'strip_files_prefix',\n 'comment_lines',\n 'get_lines',\n 'ipython2python',\n 'posix_path',\n]\n\n\ndef wrap_text(text, width=100):\n \"\"\" \n Intelligently wrap text.\n Wrap text without breaking words if possible.\n \n Parameters\n ----------\n text : str\n Text to wrap.\n width : int, optional\n Number of characters to wrap to, default 100.\n \"\"\"\n\n split_text = text.split('\\n')\n wrp = map(lambda x:textwrap.wrap(x,width), split_text)\n wrpd = map('\\n'.join, wrp)\n return '\\n'.join(wrpd)\n\n\ndef html2text(element):\n \"\"\"extract inner text from html\n \n Analog of jQuery's $(element).text()\n \"\"\"\n if isinstance(element, py3compat.string_types):\n element = ElementTree.fromstring(element)\n \n text = element.text or \"\"\n for child in element:\n text += html2text(child)\n text += (element.tail or \"\")\n return text\n\n\ndef add_anchor(html):\n \"\"\"Add an anchor-link to an html header tag\n \n For use in heading cells\n \"\"\"\n h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n link = html2text(h).replace(' ', '-')\n h.set('id', link)\n a = ElementTree.Element(\"a\", {\"class\" : \"anchor-link\", \"href\" : \"#\" + link})\n a.text = u'\u00b6'\n h.append(a)\n\n # Known issue of Python3.x, ElementTree.tostring() returns a byte string\n # instead of a text string. See issue http://bugs.python.org/issue10942\n # Workaround is to make sure the bytes are casted to a string.\n return py3compat.decode(ElementTree.tostring(h), 'utf-8')\n\n\ndef strip_dollars(text):\n \"\"\"\n Remove all dollar symbols from text\n \n Parameters\n ----------\n text : str\n Text to remove dollars from\n \"\"\"\n\n return text.strip('$')\n\n\nfiles_url_pattern = re.compile(r'(src|href)\\=([\\'\"]?)files/')\n\ndef strip_files_prefix(text):\n \"\"\"\n Fix all fake URLs that start with `files/`,\n stripping out the `files/` prefix.\n \n Parameters\n ----------\n text : str\n Text in which to replace 'src=\"files/real...' with 'src=\"real...'\n \"\"\"\n return files_url_pattern.sub(r\"\\1=\\2\", text)\n\n\ndef comment_lines(text, prefix='# '):\n \"\"\"\n Build a Python comment line from input text.\n \n Parameters\n ----------\n text : str\n Text to comment out.\n prefix : str\n Character to append to the start of each line.\n \"\"\"\n \n #Replace line breaks with line breaks and comment symbols.\n #Also add a comment symbol at the beginning to comment out\n #the first line.\n return prefix + ('\\n'+prefix).join(text.split('\\n')) \n\n\ndef get_lines(text, start=None,end=None):\n \"\"\"\n Split the input text into separate lines and then return the \n lines that the caller is interested in.\n \n Parameters\n ----------\n text : str\n Text to parse lines from.\n start : int, optional\n First line to grab from.\n end : int, optional\n Last line to grab from.\n \"\"\"\n \n # Split the input into lines.\n lines = text.split(\"\\n\")\n \n # Return the right lines.\n return \"\\n\".join(lines[start:end]) #re-join\n\ndef ipython2python(code):\n \"\"\"Transform IPython syntax to pure Python syntax\n\n Parameters\n ----------\n\n code : str\n IPython code, to be transformed to pure Python\n \"\"\"\n shell = InteractiveShell.instance()\n return shell.input_transformer_manager.transform_cell(code)\n\ndef posix_path(path):\n \"\"\"Turn a path into posix-style path/to/etc\n \n Mainly for use in latex on Windows,\n where native Windows paths are not allowed.\n \"\"\"\n if os.path.sep != '/':\n return path.replace(os.path.sep, '/')\n return path\n", "path": "IPython/nbconvert/filters/strings.py"}]} | 2,537 | 159 |
gh_patches_debug_28054 | rasdani/github-patches | git_diff | scrapy__scrapy-5885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_func_args does not fully work in CPython
As [shown in tests](https://github.com/scrapy/scrapy/blob/ada917307844950a81226f020b596d5932187f6e/tests/test_utils_python.py#L240-L243), `get_func_args` does not work in CPython with inputs like `str.split`, `"".join` or `itemgetter(2)`.
</issue>
<code>
[start of scrapy/utils/python.py]
1 """
2 This module contains essential stuff that should've come with Python itself ;)
3 """
4 import gc
5 import inspect
6 import re
7 import sys
8 import weakref
9 from functools import partial, wraps
10 from itertools import chain
11 from typing import Any, AsyncGenerator, AsyncIterable, Iterable, Union
12
13 from scrapy.utils.asyncgen import as_async_generator
14
15
16 def flatten(x):
17 """flatten(sequence) -> list
18
19 Returns a single, flat list which contains all elements retrieved
20 from the sequence and all recursively contained sub-sequences
21 (iterables).
22
23 Examples:
24 >>> [1, 2, [3,4], (5,6)]
25 [1, 2, [3, 4], (5, 6)]
26 >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
27 [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
28 >>> flatten(["foo", "bar"])
29 ['foo', 'bar']
30 >>> flatten(["foo", ["baz", 42], "bar"])
31 ['foo', 'baz', 42, 'bar']
32 """
33 return list(iflatten(x))
34
35
36 def iflatten(x):
37 """iflatten(sequence) -> iterator
38
39 Similar to ``.flatten()``, but returns iterator instead"""
40 for el in x:
41 if is_listlike(el):
42 for el_ in iflatten(el):
43 yield el_
44 else:
45 yield el
46
47
48 def is_listlike(x: Any) -> bool:
49 """
50 >>> is_listlike("foo")
51 False
52 >>> is_listlike(5)
53 False
54 >>> is_listlike(b"foo")
55 False
56 >>> is_listlike([b"foo"])
57 True
58 >>> is_listlike((b"foo",))
59 True
60 >>> is_listlike({})
61 True
62 >>> is_listlike(set())
63 True
64 >>> is_listlike((x for x in range(3)))
65 True
66 >>> is_listlike(range(5))
67 True
68 """
69 return hasattr(x, "__iter__") and not isinstance(x, (str, bytes))
70
71
72 def unique(list_, key=lambda x: x):
73 """efficient function to uniquify a list preserving item order"""
74 seen = set()
75 result = []
76 for item in list_:
77 seenkey = key(item)
78 if seenkey in seen:
79 continue
80 seen.add(seenkey)
81 result.append(item)
82 return result
83
84
85 def to_unicode(text, encoding=None, errors="strict"):
86 """Return the unicode representation of a bytes object ``text``. If
87 ``text`` is already an unicode object, return it as-is."""
88 if isinstance(text, str):
89 return text
90 if not isinstance(text, (bytes, str)):
91 raise TypeError(
92 "to_unicode must receive a bytes or str "
93 f"object, got {type(text).__name__}"
94 )
95 if encoding is None:
96 encoding = "utf-8"
97 return text.decode(encoding, errors)
98
99
100 def to_bytes(text, encoding=None, errors="strict"):
101 """Return the binary representation of ``text``. If ``text``
102 is already a bytes object, return it as-is."""
103 if isinstance(text, bytes):
104 return text
105 if not isinstance(text, str):
106 raise TypeError(
107 "to_bytes must receive a str or bytes " f"object, got {type(text).__name__}"
108 )
109 if encoding is None:
110 encoding = "utf-8"
111 return text.encode(encoding, errors)
112
113
114 def re_rsearch(pattern, text, chunk_size=1024):
115 """
116 This function does a reverse search in a text using a regular expression
117 given in the attribute 'pattern'.
118 Since the re module does not provide this functionality, we have to find for
119 the expression into chunks of text extracted from the end (for the sake of efficiency).
120 At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
121 the pattern. If the pattern is not found, another chunk is extracted, and another
122 search is performed.
123 This process continues until a match is found, or until the whole file is read.
124 In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
125 the start position of the match, and the ending (regarding the entire text).
126 """
127
128 def _chunk_iter():
129 offset = len(text)
130 while True:
131 offset -= chunk_size * 1024
132 if offset <= 0:
133 break
134 yield (text[offset:], offset)
135 yield (text, 0)
136
137 if isinstance(pattern, str):
138 pattern = re.compile(pattern)
139
140 for chunk, offset in _chunk_iter():
141 matches = [match for match in pattern.finditer(chunk)]
142 if matches:
143 start, end = matches[-1].span()
144 return offset + start, offset + end
145 return None
146
147
148 def memoizemethod_noargs(method):
149 """Decorator to cache the result of a method (without arguments) using a
150 weak reference to its object
151 """
152 cache = weakref.WeakKeyDictionary()
153
154 @wraps(method)
155 def new_method(self, *args, **kwargs):
156 if self not in cache:
157 cache[self] = method(self, *args, **kwargs)
158 return cache[self]
159
160 return new_method
161
162
163 _BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
164 _BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
165
166
167 def binary_is_text(data):
168 """Returns ``True`` if the given ``data`` argument (a ``bytes`` object)
169 does not contain unprintable control characters.
170 """
171 if not isinstance(data, bytes):
172 raise TypeError(f"data must be bytes, got '{type(data).__name__}'")
173 return all(c not in _BINARYCHARS for c in data)
174
175
176 def get_func_args(func, stripself=False):
177 """Return the argument name list of a callable"""
178 if inspect.isfunction(func):
179 spec = inspect.getfullargspec(func)
180 func_args = spec.args + spec.kwonlyargs
181 elif inspect.isclass(func):
182 return get_func_args(func.__init__, True)
183 elif inspect.ismethod(func):
184 return get_func_args(func.__func__, True)
185 elif inspect.ismethoddescriptor(func):
186 return []
187 elif isinstance(func, partial):
188 return [
189 x
190 for x in get_func_args(func.func)[len(func.args) :]
191 if not (func.keywords and x in func.keywords)
192 ]
193 elif hasattr(func, "__call__"):
194 if inspect.isroutine(func):
195 return []
196 if getattr(func, "__name__", None) == "__call__":
197 return []
198 return get_func_args(func.__call__, True)
199 else:
200 raise TypeError(f"{type(func)} is not callable")
201 if stripself:
202 func_args.pop(0)
203 return func_args
204
205
206 def get_spec(func):
207 """Returns (args, kwargs) tuple for a function
208 >>> import re
209 >>> get_spec(re.match)
210 (['pattern', 'string'], {'flags': 0})
211
212 >>> class Test:
213 ... def __call__(self, val):
214 ... pass
215 ... def method(self, val, flags=0):
216 ... pass
217
218 >>> get_spec(Test)
219 (['self', 'val'], {})
220
221 >>> get_spec(Test.method)
222 (['self', 'val'], {'flags': 0})
223
224 >>> get_spec(Test().method)
225 (['self', 'val'], {'flags': 0})
226 """
227
228 if inspect.isfunction(func) or inspect.ismethod(func):
229 spec = inspect.getfullargspec(func)
230 elif hasattr(func, "__call__"):
231 spec = inspect.getfullargspec(func.__call__)
232 else:
233 raise TypeError(f"{type(func)} is not callable")
234
235 defaults = spec.defaults or []
236
237 firstdefault = len(spec.args) - len(defaults)
238 args = spec.args[:firstdefault]
239 kwargs = dict(zip(spec.args[firstdefault:], defaults))
240 return args, kwargs
241
242
243 def equal_attributes(obj1, obj2, attributes):
244 """Compare two objects attributes"""
245 # not attributes given return False by default
246 if not attributes:
247 return False
248
249 temp1, temp2 = object(), object()
250 for attr in attributes:
251 # support callables like itemgetter
252 if callable(attr):
253 if attr(obj1) != attr(obj2):
254 return False
255 elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):
256 return False
257 # all attributes equal
258 return True
259
260
261 def without_none_values(iterable):
262 """Return a copy of ``iterable`` with all ``None`` entries removed.
263
264 If ``iterable`` is a mapping, return a dictionary where all pairs that have
265 value ``None`` have been removed.
266 """
267 try:
268 return {k: v for k, v in iterable.items() if v is not None}
269 except AttributeError:
270 return type(iterable)((v for v in iterable if v is not None))
271
272
273 def global_object_name(obj):
274 """
275 Return full name of a global object.
276
277 >>> from scrapy import Request
278 >>> global_object_name(Request)
279 'scrapy.http.request.Request'
280 """
281 return f"{obj.__module__}.{obj.__name__}"
282
283
284 if hasattr(sys, "pypy_version_info"):
285
286 def garbage_collect():
287 # Collecting weakreferences can take two collections on PyPy.
288 gc.collect()
289 gc.collect()
290
291 else:
292
293 def garbage_collect():
294 gc.collect()
295
296
297 class MutableChain(Iterable):
298 """
299 Thin wrapper around itertools.chain, allowing to add iterables "in-place"
300 """
301
302 def __init__(self, *args: Iterable):
303 self.data = chain.from_iterable(args)
304
305 def extend(self, *iterables: Iterable) -> None:
306 self.data = chain(self.data, chain.from_iterable(iterables))
307
308 def __iter__(self):
309 return self
310
311 def __next__(self):
312 return next(self.data)
313
314
315 async def _async_chain(*iterables: Union[Iterable, AsyncIterable]) -> AsyncGenerator:
316 for it in iterables:
317 async for o in as_async_generator(it):
318 yield o
319
320
321 class MutableAsyncChain(AsyncIterable):
322 """
323 Similar to MutableChain but for async iterables
324 """
325
326 def __init__(self, *args: Union[Iterable, AsyncIterable]):
327 self.data = _async_chain(*args)
328
329 def extend(self, *iterables: Union[Iterable, AsyncIterable]) -> None:
330 self.data = _async_chain(self.data, _async_chain(*iterables))
331
332 def __aiter__(self):
333 return self
334
335 async def __anext__(self):
336 return await self.data.__anext__()
337
[end of scrapy/utils/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -174,33 +174,33 @@
def get_func_args(func, stripself=False):
- """Return the argument name list of a callable"""
- if inspect.isfunction(func):
- spec = inspect.getfullargspec(func)
- func_args = spec.args + spec.kwonlyargs
- elif inspect.isclass(func):
- return get_func_args(func.__init__, True)
- elif inspect.ismethod(func):
- return get_func_args(func.__func__, True)
- elif inspect.ismethoddescriptor(func):
- return []
- elif isinstance(func, partial):
- return [
- x
- for x in get_func_args(func.func)[len(func.args) :]
- if not (func.keywords and x in func.keywords)
- ]
- elif hasattr(func, "__call__"):
- if inspect.isroutine(func):
- return []
- if getattr(func, "__name__", None) == "__call__":
- return []
- return get_func_args(func.__call__, True)
+ """Return the argument name list of a callable object"""
+ if not callable(func):
+ raise TypeError(f"func must be callable, got '{type(func).__name__}'")
+
+ args = []
+ try:
+ sig = inspect.signature(func)
+ except ValueError:
+ return args
+
+ if isinstance(func, partial):
+ partial_args = func.args
+ partial_kw = func.keywords
+
+ for name, param in sig.parameters.items():
+ if param.name in partial_args:
+ continue
+ if partial_kw and param.name in partial_kw:
+ continue
+ args.append(name)
else:
- raise TypeError(f"{type(func)} is not callable")
- if stripself:
- func_args.pop(0)
- return func_args
+ for name in sig.parameters.keys():
+ args.append(name)
+
+ if stripself and args and args[0] == "self":
+ args = args[1:]
+ return args
def get_spec(func):
| {"golden_diff": "diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py\n--- a/scrapy/utils/python.py\n+++ b/scrapy/utils/python.py\n@@ -174,33 +174,33 @@\n \n \n def get_func_args(func, stripself=False):\n- \"\"\"Return the argument name list of a callable\"\"\"\n- if inspect.isfunction(func):\n- spec = inspect.getfullargspec(func)\n- func_args = spec.args + spec.kwonlyargs\n- elif inspect.isclass(func):\n- return get_func_args(func.__init__, True)\n- elif inspect.ismethod(func):\n- return get_func_args(func.__func__, True)\n- elif inspect.ismethoddescriptor(func):\n- return []\n- elif isinstance(func, partial):\n- return [\n- x\n- for x in get_func_args(func.func)[len(func.args) :]\n- if not (func.keywords and x in func.keywords)\n- ]\n- elif hasattr(func, \"__call__\"):\n- if inspect.isroutine(func):\n- return []\n- if getattr(func, \"__name__\", None) == \"__call__\":\n- return []\n- return get_func_args(func.__call__, True)\n+ \"\"\"Return the argument name list of a callable object\"\"\"\n+ if not callable(func):\n+ raise TypeError(f\"func must be callable, got '{type(func).__name__}'\")\n+\n+ args = []\n+ try:\n+ sig = inspect.signature(func)\n+ except ValueError:\n+ return args\n+\n+ if isinstance(func, partial):\n+ partial_args = func.args\n+ partial_kw = func.keywords\n+\n+ for name, param in sig.parameters.items():\n+ if param.name in partial_args:\n+ continue\n+ if partial_kw and param.name in partial_kw:\n+ continue\n+ args.append(name)\n else:\n- raise TypeError(f\"{type(func)} is not callable\")\n- if stripself:\n- func_args.pop(0)\n- return func_args\n+ for name in sig.parameters.keys():\n+ args.append(name)\n+\n+ if stripself and args and args[0] == \"self\":\n+ args = args[1:]\n+ return args\n \n \n def get_spec(func):\n", "issue": "get_func_args does not fully work in CPython\nAs [shown in tests](https://github.com/scrapy/scrapy/blob/ada917307844950a81226f020b596d5932187f6e/tests/test_utils_python.py#L240-L243), `get_func_args` does not work in CPython with inputs like `str.split`, `\"\".join` or `itemgetter(2)`.\n", "before_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\"\"\"\nimport gc\nimport inspect\nimport re\nimport sys\nimport weakref\nfrom functools import partial, wraps\nfrom itertools import chain\nfrom typing import Any, AsyncGenerator, AsyncIterable, Iterable, Union\n\nfrom scrapy.utils.asyncgen import as_async_generator\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n >>> flatten([\"foo\", \"bar\"])\n ['foo', 'bar']\n >>> flatten([\"foo\", [\"baz\", 42], \"bar\"])\n ['foo', 'baz', 42, 'bar']\n \"\"\"\n return list(iflatten(x))\n\n\ndef iflatten(x):\n \"\"\"iflatten(sequence) -> iterator\n\n Similar to ``.flatten()``, but returns iterator instead\"\"\"\n for el in x:\n if is_listlike(el):\n for el_ in iflatten(el):\n yield el_\n else:\n yield el\n\n\ndef is_listlike(x: Any) -> bool:\n \"\"\"\n >>> is_listlike(\"foo\")\n False\n >>> is_listlike(5)\n False\n >>> is_listlike(b\"foo\")\n False\n >>> is_listlike([b\"foo\"])\n True\n >>> is_listlike((b\"foo\",))\n True\n >>> is_listlike({})\n True\n >>> is_listlike(set())\n True\n >>> is_listlike((x for x in range(3)))\n True\n >>> is_listlike(range(5))\n True\n \"\"\"\n return hasattr(x, \"__iter__\") and not isinstance(x, (str, bytes))\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef to_unicode(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the unicode representation of a bytes object ``text``. If\n ``text`` is already an unicode object, return it as-is.\"\"\"\n if isinstance(text, str):\n return text\n if not isinstance(text, (bytes, str)):\n raise TypeError(\n \"to_unicode must receive a bytes or str \"\n f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.decode(encoding, errors)\n\n\ndef to_bytes(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the binary representation of ``text``. If ``text``\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, str):\n raise TypeError(\n \"to_bytes must receive a str or bytes \" f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.encode(encoding, errors)\n\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= chunk_size * 1024\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n start, end = matches[-1].span()\n return offset + start, offset + end\n return None\n\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n\n return new_method\n\n\n_BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b\"\\0\", b\"\\t\", b\"\\n\", b\"\\r\"}\n_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}\n\n\ndef binary_is_text(data):\n \"\"\"Returns ``True`` if the given ``data`` argument (a ``bytes`` object)\n does not contain unprintable control characters.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(f\"data must be bytes, got '{type(data).__name__}'\")\n return all(c not in _BINARYCHARS for c in data)\n\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n spec = inspect.getfullargspec(func)\n func_args = spec.args + spec.kwonlyargs\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [\n x\n for x in get_func_args(func.func)[len(func.args) :]\n if not (func.keywords and x in func.keywords)\n ]\n elif hasattr(func, \"__call__\"):\n if inspect.isroutine(func):\n return []\n if getattr(func, \"__name__\", None) == \"__call__\":\n return []\n return get_func_args(func.__call__, True)\n else:\n raise TypeError(f\"{type(func)} is not callable\")\n if stripself:\n func_args.pop(0)\n return func_args\n\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test:\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getfullargspec(func)\n elif hasattr(func, \"__call__\"):\n spec = inspect.getfullargspec(func.__call__)\n else:\n raise TypeError(f\"{type(func)} is not callable\")\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n temp1, temp2 = object(), object()\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if attr(obj1) != attr(obj2):\n return False\n elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n return False\n # all attributes equal\n return True\n\n\ndef without_none_values(iterable):\n \"\"\"Return a copy of ``iterable`` with all ``None`` entries removed.\n\n If ``iterable`` is a mapping, return a dictionary where all pairs that have\n value ``None`` have been removed.\n \"\"\"\n try:\n return {k: v for k, v in iterable.items() if v is not None}\n except AttributeError:\n return type(iterable)((v for v in iterable if v is not None))\n\n\ndef global_object_name(obj):\n \"\"\"\n Return full name of a global object.\n\n >>> from scrapy import Request\n >>> global_object_name(Request)\n 'scrapy.http.request.Request'\n \"\"\"\n return f\"{obj.__module__}.{obj.__name__}\"\n\n\nif hasattr(sys, \"pypy_version_info\"):\n\n def garbage_collect():\n # Collecting weakreferences can take two collections on PyPy.\n gc.collect()\n gc.collect()\n\nelse:\n\n def garbage_collect():\n gc.collect()\n\n\nclass MutableChain(Iterable):\n \"\"\"\n Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"\n \"\"\"\n\n def __init__(self, *args: Iterable):\n self.data = chain.from_iterable(args)\n\n def extend(self, *iterables: Iterable) -> None:\n self.data = chain(self.data, chain.from_iterable(iterables))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.data)\n\n\nasync def _async_chain(*iterables: Union[Iterable, AsyncIterable]) -> AsyncGenerator:\n for it in iterables:\n async for o in as_async_generator(it):\n yield o\n\n\nclass MutableAsyncChain(AsyncIterable):\n \"\"\"\n Similar to MutableChain but for async iterables\n \"\"\"\n\n def __init__(self, *args: Union[Iterable, AsyncIterable]):\n self.data = _async_chain(*args)\n\n def extend(self, *iterables: Union[Iterable, AsyncIterable]) -> None:\n self.data = _async_chain(self.data, _async_chain(*iterables))\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n return await self.data.__anext__()\n", "path": "scrapy/utils/python.py"}]} | 3,978 | 486 |
gh_patches_debug_6455 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct
**Describe the bug**
I tried to update, but I got this exception start from version 3.6.0 during inference
```
UnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct
```
**To Reproduce**
Steps to reproduce the behavior:
- Update so-vits-svc-fork
- Run inference
**Additional context**
Initially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.
</issue>
<code>
[start of src/so_vits_svc_fork/cluster/__init__.py]
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any
5
6 import torch
7 from sklearn.cluster import KMeans
8
9
10 def get_cluster_model(ckpt_path: Path | str):
11 with Path(ckpt_path).open("rb") as f:
12 checkpoint = torch.load(f, map_location="cpu", weights_only=True)
13 kmeans_dict = {}
14 for spk, ckpt in checkpoint.items():
15 km = KMeans(ckpt["n_features_in_"])
16 km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
17 km.__dict__["_n_threads"] = ckpt["_n_threads"]
18 km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
19 kmeans_dict[spk] = km
20 return kmeans_dict
21
22
23 def check_speaker(model: Any, speaker: Any):
24 if speaker not in model:
25 raise ValueError(f"Speaker {speaker} not in {list(model.keys())}")
26
27
28 def get_cluster_result(model: Any, x: Any, speaker: Any):
29 """
30 x: np.array [t, 256]
31 return cluster class result
32 """
33 check_speaker(model, speaker)
34 return model[speaker].predict(x)
35
36
37 def get_cluster_center_result(model: Any, x: Any, speaker: Any):
38 """x: np.array [t, 256]"""
39 check_speaker(model, speaker)
40 predict = model[speaker].predict(x)
41 return model[speaker].cluster_centers_[predict]
42
43
44 def get_center(model: Any, x: Any, speaker: Any):
45 check_speaker(model, speaker)
46 return model[speaker].cluster_centers_[x]
47
[end of src/so_vits_svc_fork/cluster/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py
--- a/src/so_vits_svc_fork/cluster/__init__.py
+++ b/src/so_vits_svc_fork/cluster/__init__.py
@@ -9,7 +9,9 @@
def get_cluster_model(ckpt_path: Path | str):
with Path(ckpt_path).open("rb") as f:
- checkpoint = torch.load(f, map_location="cpu", weights_only=True)
+ checkpoint = torch.load(
+ f, map_location="cpu"
+ ) # Danger of arbitrary code execution
kmeans_dict = {}
for spk, ckpt in checkpoint.items():
km = KMeans(ckpt["n_features_in_"])
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py\n--- a/src/so_vits_svc_fork/cluster/__init__.py\n+++ b/src/so_vits_svc_fork/cluster/__init__.py\n@@ -9,7 +9,9 @@\n \n def get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n- checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n+ checkpoint = torch.load(\n+ f, map_location=\"cpu\"\n+ ) # Danger of arbitrary code execution\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n", "issue": "UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct\n**Describe the bug**\r\nI tried to update, but I got this exception start from version 3.6.0 during inference\r\n```\r\nUnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Update so-vits-svc-fork\r\n- Run inference\r\n\r\n**Additional context**\r\nInitially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport torch\nfrom sklearn.cluster import KMeans\n\n\ndef get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n km.__dict__[\"n_features_in_\"] = ckpt[\"n_features_in_\"]\n km.__dict__[\"_n_threads\"] = ckpt[\"_n_threads\"]\n km.__dict__[\"cluster_centers_\"] = ckpt[\"cluster_centers_\"]\n kmeans_dict[spk] = km\n return kmeans_dict\n\n\ndef check_speaker(model: Any, speaker: Any):\n if speaker not in model:\n raise ValueError(f\"Speaker {speaker} not in {list(model.keys())}\")\n\n\ndef get_cluster_result(model: Any, x: Any, speaker: Any):\n \"\"\"\n x: np.array [t, 256]\n return cluster class result\n \"\"\"\n check_speaker(model, speaker)\n return model[speaker].predict(x)\n\n\ndef get_cluster_center_result(model: Any, x: Any, speaker: Any):\n \"\"\"x: np.array [t, 256]\"\"\"\n check_speaker(model, speaker)\n predict = model[speaker].predict(x)\n return model[speaker].cluster_centers_[predict]\n\n\ndef get_center(model: Any, x: Any, speaker: Any):\n check_speaker(model, speaker)\n return model[speaker].cluster_centers_[x]\n", "path": "src/so_vits_svc_fork/cluster/__init__.py"}]} | 1,204 | 179 |
gh_patches_debug_7476 | rasdani/github-patches | git_diff | Mailu__Mailu-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setup error for incorrect docker network subnet
When entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given.
If you run locally, the error is clearly reported in an ungraceful way.
</issue>
<code>
[start of setup/server.py]
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(versions=os.getenv("VERSIONS","master").split(','))
57
58 prefix_bp = flask.Blueprint(version, __name__)
59 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
60 jinja2.FileSystemLoader(os.path.join(path, "templates")),
61 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
62 ])
63
64 root_bp = flask.Blueprint("root", __name__)
65 root_bp.jinja_loader = jinja2.ChoiceLoader([
66 jinja2.FileSystemLoader(os.path.join(path, "templates")),
67 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
68 ])
69
70 @prefix_bp.context_processor
71 @root_bp.context_processor
72 def bp_context(version=version):
73 return dict(version=version)
74
75 @prefix_bp.route("/")
76 @root_bp.route("/")
77 def wizard():
78 return flask.render_template('wizard.html')
79
80 @prefix_bp.route("/submit_flavor", methods=["POST"])
81 @root_bp.route("/submit_flavor", methods=["POST"])
82 def submit_flavor():
83 data = flask.request.form.copy()
84 subnet6 = random_ipv6_subnet()
85 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
86 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
87
88 @prefix_bp.route("/submit", methods=["POST"])
89 @root_bp.route("/submit", methods=["POST"])
90 def submit():
91 data = flask.request.form.copy()
92 data['uid'] = str(uuid.uuid4())
93 try:
94 data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
95 except ValueError as err:
96 return "Error while generating files: " + str(err)
97 db.set(data['uid'], json.dumps(data))
98 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
99
100 @prefix_bp.route("/setup/<uid>", methods=["GET"])
101 @root_bp.route("/setup/<uid>", methods=["GET"])
102 def setup(uid):
103 data = json.loads(db.get(uid))
104 flavor = data.get("flavor", "compose")
105 rendered = render_flavor(flavor, "setup.html", data)
106 return flask.render_template("setup.html", contents=rendered)
107
108 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
109 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
110 def file(uid, filepath):
111 data = json.loads(db.get(uid))
112 flavor = data.get("flavor", "compose")
113 return flask.Response(
114 render_flavor(flavor, filepath, data),
115 mimetype="application/text"
116 )
117
118 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
119 app.register_blueprint(root_bp)
120
121
122 if __name__ == "__main__":
123 build_app("/tmp/mailutest")
124 app.run(debug=True)
125
[end of setup/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -91,7 +91,7 @@
data = flask.request.form.copy()
data['uid'] = str(uuid.uuid4())
try:
- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
except ValueError as err:
return "Error while generating files: " + str(err)
db.set(data['uid'], json.dumps(data))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -91,7 +91,7 @@\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n", "issue": "Setup error for incorrect docker network subnet\nWhen entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given. \r\n\r\nIf you run locally, the error is clearly reported in an ungraceful way.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,938 | 132 |
gh_patches_debug_23312 | rasdani/github-patches | git_diff | ephios-dev__ephios-338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Format event description
As a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.
</issue>
<code>
[start of ephios/extra/templatetags/rich_text.py]
1 import bleach
2 import markdown
3 from django import template
4 from django.utils.safestring import mark_safe
5
6 register = template.Library()
7
8 ALLOWED_TAGS = [
9 "a",
10 "abbr",
11 "acronym",
12 "b",
13 "blockquote",
14 "br",
15 "code",
16 "div",
17 "em",
18 "h1",
19 "h2",
20 "h3",
21 "h4",
22 "h5",
23 "h6",
24 "hr",
25 "i",
26 "li",
27 "ol",
28 "p",
29 "pre",
30 "span",
31 "strong",
32 "table",
33 "tbody",
34 "td",
35 "th",
36 "thead",
37 "tr",
38 "ul",
39 ]
40
41
42 ALLOWED_ATTRIBUTES = {
43 "a": ["href", "title", "class"],
44 "abbr": ["title"],
45 "acronym": ["title"],
46 "table": ["width"],
47 "td": ["width", "align"],
48 "div": ["class"],
49 "p": ["class"],
50 "span": ["class", "title"],
51 }
52
53 ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
54
55
56 def markdown_compile(source):
57 extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
58 return bleach.clean(
59 markdown.markdown(source, extensions=extensions),
60 tags=ALLOWED_TAGS,
61 attributes=ALLOWED_ATTRIBUTES,
62 protocols=ALLOWED_PROTOCOLS,
63 )
64
65
66 @register.filter
67 def rich_text(text: str, **kwargs):
68 """
69 Processes markdown and cleans HTML in a text input.
70 """
71 text = str(text)
72 linker = bleach.Linker(parse_email=True)
73 body_md = linker.linkify(markdown_compile(text))
74 return mark_safe(body_md)
75
[end of ephios/extra/templatetags/rich_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py
--- a/ephios/extra/templatetags/rich_text.py
+++ b/ephios/extra/templatetags/rich_text.py
@@ -53,22 +53,28 @@
ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
-def markdown_compile(source):
+def markdown_compile(source, excluded_tags=""):
extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
+ tags = ALLOWED_TAGS.copy()
+ for tag in excluded_tags.split(","):
+ try:
+ tags.remove(tag)
+ except ValueError:
+ pass
return bleach.clean(
markdown.markdown(source, extensions=extensions),
- tags=ALLOWED_TAGS,
+ tags=tags,
attributes=ALLOWED_ATTRIBUTES,
protocols=ALLOWED_PROTOCOLS,
)
@register.filter
-def rich_text(text: str, **kwargs):
+def rich_text(text: str, excluded_tags=""):
"""
Processes markdown and cleans HTML in a text input.
"""
text = str(text)
linker = bleach.Linker(parse_email=True)
- body_md = linker.linkify(markdown_compile(text))
+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))
return mark_safe(body_md)
| {"golden_diff": "diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py\n--- a/ephios/extra/templatetags/rich_text.py\n+++ b/ephios/extra/templatetags/rich_text.py\n@@ -53,22 +53,28 @@\n ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n \n \n-def markdown_compile(source):\n+def markdown_compile(source, excluded_tags=\"\"):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n+ tags = ALLOWED_TAGS.copy()\n+ for tag in excluded_tags.split(\",\"):\n+ try:\n+ tags.remove(tag)\n+ except ValueError:\n+ pass\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n- tags=ALLOWED_TAGS,\n+ tags=tags,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n \n \n @register.filter\n-def rich_text(text: str, **kwargs):\n+def rich_text(text: str, excluded_tags=\"\"):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n- body_md = linker.linkify(markdown_compile(text))\n+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))\n return mark_safe(body_md)\n", "issue": "Format event description\nAs a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.\n", "before_files": [{"content": "import bleach\nimport markdown\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"td\",\n \"th\",\n \"thead\",\n \"tr\",\n \"ul\",\n]\n\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"class\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"table\": [\"width\"],\n \"td\": [\"width\", \"align\"],\n \"div\": [\"class\"],\n \"p\": [\"class\"],\n \"span\": [\"class\", \"title\"],\n}\n\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n\n\ndef markdown_compile(source):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n body_md = linker.linkify(markdown_compile(text))\n return mark_safe(body_md)\n", "path": "ephios/extra/templatetags/rich_text.py"}]} | 1,125 | 314 |
gh_patches_debug_39701 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4339 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[performance] Unwanted cast from float32 to double in slic
## Description
If a `float32` image is given to `skimage.segmentation.slic`, it will be cast to `double` implicitly.
In fact, in `slic`, the Cython function `_slic_cython` is called with the input image preprocessed with `img_as_float`. If the input array data type is `float32`, it is not touched.
Since the `_slic_cython` signature is
```
(double[:, :, :, ::1] image_zyx,
double[:, ::1] segments,
float step,
Py_ssize_t max_iter,
double[::1] spacing,
bint slic_zero)
```
the input image is automatically and silently cast to `double`.
Using the `np_floats` fused type as advised in #3111 solves the problem.
</issue>
<code>
[start of skimage/segmentation/slic_superpixels.py]
1 from collections.abc import Iterable
2 import numpy as np
3 from scipy import ndimage as ndi
4
5 from ..util import img_as_float, regular_grid
6 from ..segmentation._slic import (_slic_cython,
7 _enforce_label_connectivity_cython)
8 from ..color import rgb2lab
9
10
11 def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
12 spacing=None, multichannel=True, convert2lab=None,
13 enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,
14 slic_zero=False):
15 """Segments image using k-means clustering in Color-(x,y,z) space.
16
17 Parameters
18 ----------
19 image : 2D, 3D or 4D ndarray
20 Input image, which can be 2D or 3D, and grayscale or multichannel
21 (see `multichannel` parameter).
22 n_segments : int, optional
23 The (approximate) number of labels in the segmented output image.
24 compactness : float, optional
25 Balances color proximity and space proximity. Higher values give
26 more weight to space proximity, making superpixel shapes more
27 square/cubic. In SLICO mode, this is the initial compactness.
28 This parameter depends strongly on image contrast and on the
29 shapes of objects in the image. We recommend exploring possible
30 values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before
31 refining around a chosen value.
32 max_iter : int, optional
33 Maximum number of iterations of k-means.
34 sigma : float or (3,) array-like of floats, optional
35 Width of Gaussian smoothing kernel for pre-processing for each
36 dimension of the image. The same sigma is applied to each dimension in
37 case of a scalar value. Zero means no smoothing.
38 Note, that `sigma` is automatically scaled if it is scalar and a
39 manual voxel spacing is provided (see Notes section).
40 spacing : (3,) array-like of floats, optional
41 The voxel spacing along each image dimension. By default, `slic`
42 assumes uniform spacing (same voxel resolution along z, y and x).
43 This parameter controls the weights of the distances along z, y,
44 and x during k-means clustering.
45 multichannel : bool, optional
46 Whether the last axis of the image is to be interpreted as multiple
47 channels or another spatial dimension.
48 convert2lab : bool, optional
49 Whether the input should be converted to Lab colorspace prior to
50 segmentation. The input image *must* be RGB. Highly recommended.
51 This option defaults to ``True`` when ``multichannel=True`` *and*
52 ``image.shape[-1] == 3``.
53 enforce_connectivity: bool, optional
54 Whether the generated segments are connected or not
55 min_size_factor: float, optional
56 Proportion of the minimum segment size to be removed with respect
57 to the supposed segment size ```depth*width*height/n_segments```
58 max_size_factor: float, optional
59 Proportion of the maximum connected segment size. A value of 3 works
60 in most of the cases.
61 slic_zero: bool, optional
62 Run SLIC-zero, the zero-parameter mode of SLIC. [2]_
63
64 Returns
65 -------
66 labels : 2D or 3D array
67 Integer mask indicating segment labels.
68
69 Raises
70 ------
71 ValueError
72 If ``convert2lab`` is set to ``True`` but the last array
73 dimension is not of length 3.
74
75 Notes
76 -----
77 * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
78 segmentation.
79
80 * If `sigma` is scalar and `spacing` is provided, the kernel width is
81 divided along each dimension by the spacing. For example, if ``sigma=1``
82 and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
83 ensures sensible smoothing for anisotropic images.
84
85 * The image is rescaled to be in [0, 1] prior to processing.
86
87 * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
88 interpret them as 3D with the last dimension having length 3, use
89 `multichannel=False`.
90
91 References
92 ----------
93 .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
94 Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
95 State-of-the-art Superpixel Methods, TPAMI, May 2012.
96 .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO
97
98 Examples
99 --------
100 >>> from skimage.segmentation import slic
101 >>> from skimage.data import astronaut
102 >>> img = astronaut()
103 >>> segments = slic(img, n_segments=100, compactness=10)
104
105 Increasing the compactness parameter yields more square regions:
106
107 >>> segments = slic(img, n_segments=100, compactness=20)
108
109 """
110
111 image = img_as_float(image)
112 is_2d = False
113 if image.ndim == 2:
114 # 2D grayscale image
115 image = image[np.newaxis, ..., np.newaxis]
116 is_2d = True
117 elif image.ndim == 3 and multichannel:
118 # Make 2D multichannel image 3D with depth = 1
119 image = image[np.newaxis, ...]
120 is_2d = True
121 elif image.ndim == 3 and not multichannel:
122 # Add channel as single last dimension
123 image = image[..., np.newaxis]
124
125 if spacing is None:
126 spacing = np.ones(3)
127 elif isinstance(spacing, (list, tuple)):
128 spacing = np.array(spacing, dtype=np.double)
129
130 if not isinstance(sigma, Iterable):
131 sigma = np.array([sigma, sigma, sigma], dtype=np.double)
132 sigma /= spacing.astype(np.double)
133 elif isinstance(sigma, (list, tuple)):
134 sigma = np.array(sigma, dtype=np.double)
135 if (sigma > 0).any():
136 # add zero smoothing for multichannel dimension
137 sigma = list(sigma) + [0]
138 image = ndi.gaussian_filter(image, sigma)
139
140 if multichannel and (convert2lab or convert2lab is None):
141 if image.shape[-1] != 3 and convert2lab:
142 raise ValueError("Lab colorspace conversion requires a RGB image.")
143 elif image.shape[-1] == 3:
144 image = rgb2lab(image)
145
146 depth, height, width = image.shape[:3]
147
148 # initialize cluster centroids for desired number of segments
149 grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
150 slices = regular_grid(image.shape[:3], n_segments)
151 step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
152 for s in slices]
153 segments_z = grid_z[slices]
154 segments_y = grid_y[slices]
155 segments_x = grid_x[slices]
156
157 segments_color = np.zeros(segments_z.shape + (image.shape[3],))
158 segments = np.concatenate([segments_z[..., np.newaxis],
159 segments_y[..., np.newaxis],
160 segments_x[..., np.newaxis],
161 segments_color],
162 axis=-1).reshape(-1, 3 + image.shape[3])
163 segments = np.ascontiguousarray(segments)
164
165 # we do the scaling of ratio in the same way as in the SLIC paper
166 # so the values have the same meaning
167 step = float(max((step_z, step_y, step_x)))
168 ratio = 1.0 / compactness
169
170 image = np.ascontiguousarray(image * ratio)
171
172 labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)
173
174 if enforce_connectivity:
175 segment_size = depth * height * width / n_segments
176 min_size = int(min_size_factor * segment_size)
177 max_size = int(max_size_factor * segment_size)
178 labels = _enforce_label_connectivity_cython(labels,
179 min_size,
180 max_size)
181
182 if is_2d:
183 labels = labels[0]
184
185 return labels
186
[end of skimage/segmentation/slic_superpixels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/segmentation/slic_superpixels.py b/skimage/segmentation/slic_superpixels.py
--- a/skimage/segmentation/slic_superpixels.py
+++ b/skimage/segmentation/slic_superpixels.py
@@ -109,6 +109,8 @@
"""
image = img_as_float(image)
+ dtype = image.dtype
+
is_2d = False
if image.ndim == 2:
# 2D grayscale image
@@ -123,15 +125,15 @@
image = image[..., np.newaxis]
if spacing is None:
- spacing = np.ones(3)
+ spacing = np.ones(3, dtype=dtype)
elif isinstance(spacing, (list, tuple)):
- spacing = np.array(spacing, dtype=np.double)
+ spacing = np.array(spacing, dtype=dtype)
if not isinstance(sigma, Iterable):
- sigma = np.array([sigma, sigma, sigma], dtype=np.double)
- sigma /= spacing.astype(np.double)
+ sigma = np.array([sigma, sigma, sigma], dtype=dtype)
+ sigma /= spacing.astype(dtype)
elif isinstance(sigma, (list, tuple)):
- sigma = np.array(sigma, dtype=np.double)
+ sigma = np.array(sigma, dtype=dtype)
if (sigma > 0).any():
# add zero smoothing for multichannel dimension
sigma = list(sigma) + [0]
@@ -146,7 +148,10 @@
depth, height, width = image.shape[:3]
# initialize cluster centroids for desired number of segments
- grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
+ grid_z, grid_y, grid_x = np.meshgrid(np.arange(depth, dtype=dtype),
+ np.arange(height, dtype=dtype),
+ np.arange(width, dtype=dtype),
+ indexing='ij')
slices = regular_grid(image.shape[:3], n_segments)
step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
for s in slices]
@@ -154,7 +159,8 @@
segments_y = grid_y[slices]
segments_x = grid_x[slices]
- segments_color = np.zeros(segments_z.shape + (image.shape[3],))
+ segments_color = np.zeros(segments_z.shape + (image.shape[3],),
+ dtype=dtype)
segments = np.concatenate([segments_z[..., np.newaxis],
segments_y[..., np.newaxis],
segments_x[..., np.newaxis],
@@ -164,8 +170,8 @@
# we do the scaling of ratio in the same way as in the SLIC paper
# so the values have the same meaning
- step = float(max((step_z, step_y, step_x)))
- ratio = 1.0 / compactness
+ step = dtype.type(max((step_z, step_y, step_x)))
+ ratio = dtype.type(1.0 / compactness)
image = np.ascontiguousarray(image * ratio)
| {"golden_diff": "diff --git a/skimage/segmentation/slic_superpixels.py b/skimage/segmentation/slic_superpixels.py\n--- a/skimage/segmentation/slic_superpixels.py\n+++ b/skimage/segmentation/slic_superpixels.py\n@@ -109,6 +109,8 @@\n \"\"\"\n \n image = img_as_float(image)\n+ dtype = image.dtype\n+\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n@@ -123,15 +125,15 @@\n image = image[..., np.newaxis]\n \n if spacing is None:\n- spacing = np.ones(3)\n+ spacing = np.ones(3, dtype=dtype)\n elif isinstance(spacing, (list, tuple)):\n- spacing = np.array(spacing, dtype=np.double)\n+ spacing = np.array(spacing, dtype=dtype)\n \n if not isinstance(sigma, Iterable):\n- sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n- sigma /= spacing.astype(np.double)\n+ sigma = np.array([sigma, sigma, sigma], dtype=dtype)\n+ sigma /= spacing.astype(dtype)\n elif isinstance(sigma, (list, tuple)):\n- sigma = np.array(sigma, dtype=np.double)\n+ sigma = np.array(sigma, dtype=dtype)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n@@ -146,7 +148,10 @@\n depth, height, width = image.shape[:3]\n \n # initialize cluster centroids for desired number of segments\n- grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n+ grid_z, grid_y, grid_x = np.meshgrid(np.arange(depth, dtype=dtype),\n+ np.arange(height, dtype=dtype),\n+ np.arange(width, dtype=dtype),\n+ indexing='ij')\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step if s.step is not None else 1)\n for s in slices]\n@@ -154,7 +159,8 @@\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n \n- segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n+ segments_color = np.zeros(segments_z.shape + (image.shape[3],),\n+ dtype=dtype)\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n@@ -164,8 +170,8 @@\n \n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n- step = float(max((step_z, step_y, step_x)))\n- ratio = 1.0 / compactness\n+ step = dtype.type(max((step_z, step_y, step_x)))\n+ ratio = dtype.type(1.0 / compactness)\n \n image = np.ascontiguousarray(image * ratio)\n", "issue": "[performance] Unwanted cast from float32 to double in slic\n## Description\r\n\r\nIf a `float32` image is given to `skimage.segmentation.slic`, it will be cast to `double` implicitly.\r\n\r\nIn fact, in `slic`, the Cython function `_slic_cython` is called with the input image preprocessed with `img_as_float`. If the input array data type is `float32`, it is not touched.\r\nSince the `_slic_cython` signature is \r\n```\r\n (double[:, :, :, ::1] image_zyx,\r\n double[:, ::1] segments,\r\n float step,\r\n Py_ssize_t max_iter,\r\n double[::1] spacing,\r\n bint slic_zero)\r\n```\r\nthe input image is automatically and silently cast to `double`.\r\n\r\nUsing the `np_floats` fused type as advised in #3111 solves the problem.\r\n\n", "before_files": [{"content": "from collections.abc import Iterable\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom ..util import img_as_float, regular_grid\nfrom ..segmentation._slic import (_slic_cython,\n _enforce_label_connectivity_cython)\nfrom ..color import rgb2lab\n\n\ndef slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,\n spacing=None, multichannel=True, convert2lab=None,\n enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,\n slic_zero=False):\n \"\"\"Segments image using k-means clustering in Color-(x,y,z) space.\n\n Parameters\n ----------\n image : 2D, 3D or 4D ndarray\n Input image, which can be 2D or 3D, and grayscale or multichannel\n (see `multichannel` parameter).\n n_segments : int, optional\n The (approximate) number of labels in the segmented output image.\n compactness : float, optional\n Balances color proximity and space proximity. Higher values give\n more weight to space proximity, making superpixel shapes more\n square/cubic. In SLICO mode, this is the initial compactness.\n This parameter depends strongly on image contrast and on the\n shapes of objects in the image. We recommend exploring possible\n values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before\n refining around a chosen value.\n max_iter : int, optional\n Maximum number of iterations of k-means.\n sigma : float or (3,) array-like of floats, optional\n Width of Gaussian smoothing kernel for pre-processing for each\n dimension of the image. The same sigma is applied to each dimension in\n case of a scalar value. Zero means no smoothing.\n Note, that `sigma` is automatically scaled if it is scalar and a\n manual voxel spacing is provided (see Notes section).\n spacing : (3,) array-like of floats, optional\n The voxel spacing along each image dimension. By default, `slic`\n assumes uniform spacing (same voxel resolution along z, y and x).\n This parameter controls the weights of the distances along z, y,\n and x during k-means clustering.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n convert2lab : bool, optional\n Whether the input should be converted to Lab colorspace prior to\n segmentation. The input image *must* be RGB. Highly recommended.\n This option defaults to ``True`` when ``multichannel=True`` *and*\n ``image.shape[-1] == 3``.\n enforce_connectivity: bool, optional\n Whether the generated segments are connected or not\n min_size_factor: float, optional\n Proportion of the minimum segment size to be removed with respect\n to the supposed segment size ```depth*width*height/n_segments```\n max_size_factor: float, optional\n Proportion of the maximum connected segment size. A value of 3 works\n in most of the cases.\n slic_zero: bool, optional\n Run SLIC-zero, the zero-parameter mode of SLIC. [2]_\n\n Returns\n -------\n labels : 2D or 3D array\n Integer mask indicating segment labels.\n\n Raises\n ------\n ValueError\n If ``convert2lab`` is set to ``True`` but the last array\n dimension is not of length 3.\n\n Notes\n -----\n * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to\n segmentation.\n\n * If `sigma` is scalar and `spacing` is provided, the kernel width is\n divided along each dimension by the spacing. For example, if ``sigma=1``\n and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This\n ensures sensible smoothing for anisotropic images.\n\n * The image is rescaled to be in [0, 1] prior to processing.\n\n * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To\n interpret them as 3D with the last dimension having length 3, use\n `multichannel=False`.\n\n References\n ----------\n .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,\n Pascal Fua, and Sabine S\u00fcsstrunk, SLIC Superpixels Compared to\n State-of-the-art Superpixel Methods, TPAMI, May 2012.\n .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO\n\n Examples\n --------\n >>> from skimage.segmentation import slic\n >>> from skimage.data import astronaut\n >>> img = astronaut()\n >>> segments = slic(img, n_segments=100, compactness=10)\n\n Increasing the compactness parameter yields more square regions:\n\n >>> segments = slic(img, n_segments=100, compactness=20)\n\n \"\"\"\n\n image = img_as_float(image)\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n image = image[np.newaxis, ..., np.newaxis]\n is_2d = True\n elif image.ndim == 3 and multichannel:\n # Make 2D multichannel image 3D with depth = 1\n image = image[np.newaxis, ...]\n is_2d = True\n elif image.ndim == 3 and not multichannel:\n # Add channel as single last dimension\n image = image[..., np.newaxis]\n\n if spacing is None:\n spacing = np.ones(3)\n elif isinstance(spacing, (list, tuple)):\n spacing = np.array(spacing, dtype=np.double)\n\n if not isinstance(sigma, Iterable):\n sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n sigma /= spacing.astype(np.double)\n elif isinstance(sigma, (list, tuple)):\n sigma = np.array(sigma, dtype=np.double)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n image = ndi.gaussian_filter(image, sigma)\n\n if multichannel and (convert2lab or convert2lab is None):\n if image.shape[-1] != 3 and convert2lab:\n raise ValueError(\"Lab colorspace conversion requires a RGB image.\")\n elif image.shape[-1] == 3:\n image = rgb2lab(image)\n\n depth, height, width = image.shape[:3]\n\n # initialize cluster centroids for desired number of segments\n grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step if s.step is not None else 1)\n for s in slices]\n segments_z = grid_z[slices]\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n\n segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_color],\n axis=-1).reshape(-1, 3 + image.shape[3])\n segments = np.ascontiguousarray(segments)\n\n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n step = float(max((step_z, step_y, step_x)))\n ratio = 1.0 / compactness\n\n image = np.ascontiguousarray(image * ratio)\n\n labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)\n\n if enforce_connectivity:\n segment_size = depth * height * width / n_segments\n min_size = int(min_size_factor * segment_size)\n max_size = int(max_size_factor * segment_size)\n labels = _enforce_label_connectivity_cython(labels,\n min_size,\n max_size)\n\n if is_2d:\n labels = labels[0]\n\n return labels\n", "path": "skimage/segmentation/slic_superpixels.py"}]} | 3,045 | 700 |
gh_patches_debug_30302 | rasdani/github-patches | git_diff | wagtail__wagtail-10860 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please add a progress bar to "wagtail_update_image_renditions" management command
I love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like "tqdm".
</issue>
<code>
[start of wagtail/images/management/commands/wagtail_update_image_renditions.py]
1 import logging
2
3 from django.core.management.base import BaseCommand
4 from django.db import transaction
5
6 from wagtail.images import get_image_model
7
8 logger = logging.getLogger(__name__)
9
10
11 class Command(BaseCommand):
12 """Command to create missing image renditions with the option to remove (purge) any existing ones."""
13
14 help = "This command will generate all image renditions, with an option to purge existing renditions first."
15
16 def add_arguments(self, parser):
17 parser.add_argument(
18 "--purge-only",
19 action="store_true",
20 help="Purge all image renditions without regenerating them",
21 )
22 parser.add_argument(
23 "--chunk-size",
24 type=int,
25 default=50,
26 help="Operate in x size chunks (default: %(default)s)",
27 )
28
29 def handle(self, *args, **options):
30 Rendition = get_image_model().get_rendition_model()
31
32 renditions = Rendition.objects.all()
33
34 purge_only = options["purge_only"]
35
36 if not renditions.exists():
37 self.stdout.write(self.style.WARNING("No image renditions found."))
38 return
39
40 rendition_ids = list(renditions.values_list("id", flat=True))
41 num_renditions = len(rendition_ids)
42
43 if purge_only:
44 self.stdout.write(
45 self.style.HTTP_INFO(f"Purging {num_renditions} rendition(s)")
46 )
47 else:
48 self.stdout.write(
49 self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
50 )
51
52 for rendition in (
53 # Pre-calculate the ids of the renditions to change,
54 # otherwise `.iterator` never ends.
55 renditions.filter(id__in=rendition_ids)
56 .select_related("image")
57 .iterator(chunk_size=options["chunk_size"])
58 ):
59 try:
60 with transaction.atomic():
61 rendition_filter = rendition.filter
62 rendition_image = rendition.image
63
64 # Delete the existing rendition
65 rendition.delete()
66
67 if not purge_only:
68 # Create a new one
69 rendition_image.get_rendition(rendition_filter)
70 except: # noqa:E722
71 logger.exception("Error operating on rendition %d", rendition.id)
72 self.stderr.write(
73 self.style.ERROR(f"Failed to operate on rendition {rendition.id}")
74 )
75 num_renditions -= 1
76
77 if num_renditions:
78 self.stdout.write(
79 self.style.SUCCESS(
80 f"Successfully processed {num_renditions} rendition(s)"
81 )
82 )
83 else:
84 self.stdout.write(self.style.WARNING("Could not process any renditions."))
85
[end of wagtail/images/management/commands/wagtail_update_image_renditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py
--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py
+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py
@@ -8,6 +8,17 @@
logger = logging.getLogger(__name__)
+def progress_bar(current, total, bar_length=50):
+ fraction = current / total
+
+ arrow = int(fraction * bar_length - 1) * "-" + ">"
+ padding = int(bar_length - len(arrow)) * " "
+
+ ending = "\n" if current == total else "\r"
+
+ return (f"Progress: [{arrow}{padding}] {int(fraction*100)}%", ending)
+
+
class Command(BaseCommand):
"""Command to create missing image renditions with the option to remove (purge) any existing ones."""
@@ -49,6 +60,7 @@
self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
)
+ progress_bar_current = 1
for rendition in (
# Pre-calculate the ids of the renditions to change,
# otherwise `.iterator` never ends.
@@ -64,6 +76,10 @@
# Delete the existing rendition
rendition.delete()
+ _progress_bar = progress_bar(progress_bar_current, num_renditions)
+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])
+ progress_bar_current = progress_bar_current + 1
+
if not purge_only:
# Create a new one
rendition_image.get_rendition(rendition_filter)
| {"golden_diff": "diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py\n+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n@@ -8,6 +8,17 @@\n logger = logging.getLogger(__name__)\n \n \n+def progress_bar(current, total, bar_length=50):\n+ fraction = current / total\n+\n+ arrow = int(fraction * bar_length - 1) * \"-\" + \">\"\n+ padding = int(bar_length - len(arrow)) * \" \"\n+\n+ ending = \"\\n\" if current == total else \"\\r\"\n+\n+ return (f\"Progress: [{arrow}{padding}] {int(fraction*100)}%\", ending)\n+\n+\n class Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n \n@@ -49,6 +60,7 @@\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n \n+ progress_bar_current = 1\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n@@ -64,6 +76,10 @@\n # Delete the existing rendition\n rendition.delete()\n \n+ _progress_bar = progress_bar(progress_bar_current, num_renditions)\n+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])\n+ progress_bar_current = progress_bar_current + 1\n+\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n", "issue": "Please add a progress bar to \"wagtail_update_image_renditions\" management command\nI love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like \"tqdm\".\n", "before_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom wagtail.images import get_image_model\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n\n help = \"This command will generate all image renditions, with an option to purge existing renditions first.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--purge-only\",\n action=\"store_true\",\n help=\"Purge all image renditions without regenerating them\",\n )\n parser.add_argument(\n \"--chunk-size\",\n type=int,\n default=50,\n help=\"Operate in x size chunks (default: %(default)s)\",\n )\n\n def handle(self, *args, **options):\n Rendition = get_image_model().get_rendition_model()\n\n renditions = Rendition.objects.all()\n\n purge_only = options[\"purge_only\"]\n\n if not renditions.exists():\n self.stdout.write(self.style.WARNING(\"No image renditions found.\"))\n return\n\n rendition_ids = list(renditions.values_list(\"id\", flat=True))\n num_renditions = len(rendition_ids)\n\n if purge_only:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Purging {num_renditions} rendition(s)\")\n )\n else:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n renditions.filter(id__in=rendition_ids)\n .select_related(\"image\")\n .iterator(chunk_size=options[\"chunk_size\"])\n ):\n try:\n with transaction.atomic():\n rendition_filter = rendition.filter\n rendition_image = rendition.image\n\n # Delete the existing rendition\n rendition.delete()\n\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n except: # noqa:E722\n logger.exception(\"Error operating on rendition %d\", rendition.id)\n self.stderr.write(\n self.style.ERROR(f\"Failed to operate on rendition {rendition.id}\")\n )\n num_renditions -= 1\n\n if num_renditions:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Successfully processed {num_renditions} rendition(s)\"\n )\n )\n else:\n self.stdout.write(self.style.WARNING(\"Could not process any renditions.\"))\n", "path": "wagtail/images/management/commands/wagtail_update_image_renditions.py"}]} | 1,399 | 397 |
gh_patches_debug_19081 | rasdani/github-patches | git_diff | nvaccess__nvda-11609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Visual Studio IntelliSense overly verbose
To reproduce this issue:
1. I created a console application
2. In the main method I started typing "Console" and it reads ok
3. When I press "." to access "WriteLine" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.
For example, if the line is following:
string name = string.format("Hello {0}", textField.text);
When I got to the point when I type "textField" and than "." it will repeat the whole line and than read the "text" property, and it repeats if I move up or down using arrow keys.
It is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.
I also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html
</issue>
<code>
[start of source/NVDAObjects/UIA/VisualStudio.py]
1 # This file is covered by the GNU General Public License.
2 # See the file COPYING for more details.
3 # Copyright (C) 2020 NV Access Limited, Leonard de Ruijter
4
5 """
6 Object overlay classes for Visual Studio components
7 available in Visual Studio and SQL Server Management Studio.
8 """
9
10 from . import UIA
11 import speech
12 import braille
13 import api
14
15
16 class IntelliSenseItem(UIA):
17
18 def _get_name(self):
19 return self.UIAElement.cachedAutomationID
20
21 def event_UIA_elementSelected(self):
22 # Cancel speech to have speech announce the selection as soon as possible.
23 # This is needed because L{reportFocus} does not cancel speech.
24 # Therefore, if speech wouldn't be cancelled,
25 # selection announcements would queue up when changing selection rapidly.
26 speech.cancelSpeech()
27 api.setNavigatorObject(self, isFocus=True)
28 self.reportFocus()
29 # Display results as flash messages.
30 braille.handler.message(braille.getPropertiesBraille(
31 name=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description
32 ))
33
34
35 class IntelliSenseList(UIA):
36 ...
37
38
39 def findExtraOverlayClasses(obj, clsList):
40 if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
41 clsList.insert(0, IntelliSenseList)
42 elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
43 clsList.insert(0, IntelliSenseItem)
44
[end of source/NVDAObjects/UIA/VisualStudio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py
--- a/source/NVDAObjects/UIA/VisualStudio.py
+++ b/source/NVDAObjects/UIA/VisualStudio.py
@@ -36,8 +36,31 @@
...
+class IntelliSenseLiveRegion(UIA):
+ """
+ Visual Studio uses both Intellisense menu item objects and a live region
+ to communicate Intellisense selections.
+ NVDA uses the menu item approach and therefore the live region provides doubled information
+ and is disabled.
+ """
+
+ _shouldAllowUIALiveRegionChangeEvent = False
+
+
+_INTELLISENSE_LIST_AUTOMATION_IDS = {
+ "listBoxCompletions",
+ "CompletionList"
+}
+
+
def findExtraOverlayClasses(obj, clsList):
- if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
+ if obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:
clsList.insert(0, IntelliSenseList)
elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
clsList.insert(0, IntelliSenseItem)
+ elif (
+ obj.UIAElement.cachedClassName == "LiveTextBlock"
+ and obj.previous
+ and isinstance(obj.previous.previous, IntelliSenseList)
+ ):
+ clsList.insert(0, IntelliSenseLiveRegion)
| {"golden_diff": "diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py\n--- a/source/NVDAObjects/UIA/VisualStudio.py\n+++ b/source/NVDAObjects/UIA/VisualStudio.py\n@@ -36,8 +36,31 @@\n \t...\n \n \n+class IntelliSenseLiveRegion(UIA):\n+\t\"\"\"\n+\tVisual Studio uses both Intellisense menu item objects and a live region\n+\tto communicate Intellisense selections.\n+\tNVDA uses the menu item approach and therefore the live region provides doubled information\n+\tand is disabled.\n+\t\"\"\"\n+\n+\t_shouldAllowUIALiveRegionChangeEvent = False\n+\n+\n+_INTELLISENSE_LIST_AUTOMATION_IDS = {\n+\t\"listBoxCompletions\",\n+\t\"CompletionList\"\n+}\n+\n+\n def findExtraOverlayClasses(obj, clsList):\n-\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n+\tif obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:\n \t\tclsList.insert(0, IntelliSenseList)\n \telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n \t\tclsList.insert(0, IntelliSenseItem)\n+\telif (\n+\t\tobj.UIAElement.cachedClassName == \"LiveTextBlock\"\n+\t\tand obj.previous\n+\t\tand isinstance(obj.previous.previous, IntelliSenseList)\n+\t):\n+\t\tclsList.insert(0, IntelliSenseLiveRegion)\n", "issue": "Visual Studio IntelliSense overly verbose\nTo reproduce this issue:\r\n1. I created a console application\r\n2. In the main method I started typing \"Console\" and it reads ok\r\n3. When I press \".\" to access \"WriteLine\" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.\r\nFor example, if the line is following:\r\nstring name = string.format(\"Hello {0}\", textField.text);\r\n\r\nWhen I got to the point when I type \"textField\" and than \".\" it will repeat the whole line and than read the \"text\" property, and it repeats if I move up or down using arrow keys.\r\n\r\nIt is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.\r\n\r\nI also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html\n", "before_files": [{"content": "# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n# Copyright (C) 2020 NV Access Limited, Leonard de Ruijter\n\n\"\"\"\nObject overlay classes for Visual Studio components\navailable in Visual Studio and SQL Server Management Studio.\n\"\"\"\n\nfrom . import UIA\nimport speech\nimport braille\nimport api\n\n\nclass IntelliSenseItem(UIA):\n\n\tdef _get_name(self):\n\t\treturn self.UIAElement.cachedAutomationID\n\n\tdef event_UIA_elementSelected(self):\n\t\t# Cancel speech to have speech announce the selection as soon as possible.\n\t\t# This is needed because L{reportFocus} does not cancel speech.\n\t\t# Therefore, if speech wouldn't be cancelled,\n\t\t# selection announcements would queue up when changing selection rapidly.\n\t\tspeech.cancelSpeech()\n\t\tapi.setNavigatorObject(self, isFocus=True)\n\t\tself.reportFocus()\n\t\t# Display results as flash messages.\n\t\tbraille.handler.message(braille.getPropertiesBraille(\n\t\t\tname=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description\n\t\t))\n\n\nclass IntelliSenseList(UIA):\n\t...\n\n\ndef findExtraOverlayClasses(obj, clsList):\n\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n\t\tclsList.insert(0, IntelliSenseList)\n\telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n\t\tclsList.insert(0, IntelliSenseItem)\n", "path": "source/NVDAObjects/UIA/VisualStudio.py"}]} | 1,194 | 336 |
gh_patches_debug_47517 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-1727 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3 regression: Undefined Jinja2 variables get rendered as empty string in linting
### Solution to issue cannot be found in the documentation.
- [X] I checked the documentation.
### Issue
For linting, undefined Jinja2 variables get rendered by `conda_smithy.utils.NullUndefined`. That class contains a `__unicode__` method that returns the name of the variable. This is useful to put a clear placeholder where variables will be filled by variants from `conda_build_config.yaml` during the actual build. However, `NullUndefined` doesn't overwrite the `__str__` method of Jinja's own `Undefined`, which returns an empty string.
In effect, linting in a Python 2 environment renders, e.g. `- {{ libjpeg }}` as `- libjpeg`, but in a Python 3 environment, we get `- ` which becomes `None` in the `requirements_section` dictionary.
### Installed packages
```shell
-
```
### Environment info
```shell
-
```
</issue>
<code>
[start of conda_smithy/utils.py]
1 import shutil
2 import tempfile
3 import io
4 import jinja2
5 import datetime
6 import time
7 import os
8 import sys
9 from pathlib import Path
10 from collections import defaultdict
11 from contextlib import contextmanager
12
13 import ruamel.yaml
14
15
16 def get_feedstock_name_from_meta(meta):
17 """Resolve the feedtstock name from the parsed meta.yaml."""
18 if "feedstock-name" in meta.meta["extra"]:
19 return meta.meta["extra"]["feedstock-name"]
20 elif "parent_recipe" in meta.meta["extra"]:
21 return meta.meta["extra"]["parent_recipe"]["name"]
22 else:
23 return meta.name()
24
25
26 def get_feedstock_about_from_meta(meta) -> dict:
27 """Fetch the feedtstock about from the parsed meta.yaml."""
28 # it turns out that conda_build would not preserve the feedstock about:
29 # - if a subpackage does not have about, it uses the feedstock's
30 # - if a subpackage has about, it's used as is
31 # therefore we need to parse the yaml again just to get the about section...
32 if "parent_recipe" in meta.meta["extra"]:
33 recipe_meta = os.path.join(
34 meta.meta["extra"]["parent_recipe"]["path"], "meta.yaml"
35 )
36 with io.open(recipe_meta, "rt") as fh:
37 content = render_meta_yaml("".join(fh))
38 meta = get_yaml().load(content)
39 return dict(meta["about"])
40 else:
41 # no parent recipe for any reason, use self's about
42 return dict(meta.meta["about"])
43
44
45 def get_yaml():
46 # define global yaml API
47 # roundrip-loader and allowing duplicate keys
48 # for handling # [filter] / # [not filter]
49 # Don't use a global variable for this as a global
50 # variable will make conda-smithy thread unsafe.
51 yaml = ruamel.yaml.YAML(typ="rt")
52 yaml.allow_duplicate_keys = True
53 return yaml
54
55
56 @contextmanager
57 def tmp_directory():
58 tmp_dir = tempfile.mkdtemp("_recipe")
59 yield tmp_dir
60 shutil.rmtree(tmp_dir)
61
62
63 class NullUndefined(jinja2.Undefined):
64 def __unicode__(self):
65 return self._undefined_name
66
67 def __getattr__(self, name):
68 return "{}.{}".format(self, name)
69
70 def __getitem__(self, name):
71 return '{}["{}"]'.format(self, name)
72
73
74 class MockOS(dict):
75 def __init__(self):
76 self.environ = defaultdict(lambda: "")
77 self.sep = "/"
78
79
80 def stub_compatible_pin(*args, **kwargs):
81 return f"compatible_pin {args[0]}"
82
83
84 def stub_subpackage_pin(*args, **kwargs):
85 return f"subpackage_pin {args[0]}"
86
87
88 def render_meta_yaml(text):
89 env = jinja2.Environment(undefined=NullUndefined)
90
91 # stub out cb3 jinja2 functions - they are not important for linting
92 # if we don't stub them out, the ruamel.yaml load fails to interpret them
93 # we can't just use conda-build's api.render functionality, because it would apply selectors
94 env.globals.update(
95 dict(
96 compiler=lambda x: x + "_compiler_stub",
97 pin_subpackage=stub_subpackage_pin,
98 pin_compatible=stub_compatible_pin,
99 cdt=lambda *args, **kwargs: "cdt_stub",
100 load_file_regex=lambda *args, **kwargs: defaultdict(lambda: ""),
101 datetime=datetime,
102 time=time,
103 target_platform="linux-64",
104 mpi="mpi",
105 )
106 )
107 mockos = MockOS()
108 py_ver = "3.7"
109 context = {"os": mockos, "environ": mockos.environ, "PY_VER": py_ver}
110 content = env.from_string(text).render(context)
111 return content
112
113
114 @contextmanager
115 def update_conda_forge_config(forge_yaml):
116 """Utility method used to update conda forge configuration files
117
118 Uage:
119 >>> with update_conda_forge_config(somepath) as cfg:
120 ... cfg['foo'] = 'bar'
121 """
122 if os.path.exists(forge_yaml):
123 with open(forge_yaml, "r") as fh:
124 code = get_yaml().load(fh)
125 else:
126 code = {}
127
128 # Code could come in as an empty list.
129 if not code:
130 code = {}
131
132 yield code
133
134 get_yaml().dump(code, Path(forge_yaml))
135
136
137 def merge_dict(src, dest):
138 """Recursive merge dictionary"""
139 for key, value in src.items():
140 if isinstance(value, dict):
141 # get node or create one
142 node = dest.setdefault(key, {})
143 merge_dict(value, node)
144 else:
145 dest[key] = value
146
147 return dest
148
[end of conda_smithy/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py
--- a/conda_smithy/utils.py
+++ b/conda_smithy/utils.py
@@ -61,7 +61,7 @@
class NullUndefined(jinja2.Undefined):
- def __unicode__(self):
+ def __str__(self):
return self._undefined_name
def __getattr__(self, name):
| {"golden_diff": "diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py\n--- a/conda_smithy/utils.py\n+++ b/conda_smithy/utils.py\n@@ -61,7 +61,7 @@\n \n \n class NullUndefined(jinja2.Undefined):\n- def __unicode__(self):\n+ def __str__(self):\n return self._undefined_name\n \n def __getattr__(self, name):\n", "issue": "Python 3 regression: Undefined Jinja2 variables get rendered as empty string in linting\n### Solution to issue cannot be found in the documentation.\n\n- [X] I checked the documentation.\n\n### Issue\n\nFor linting, undefined Jinja2 variables get rendered by `conda_smithy.utils.NullUndefined`. That class contains a `__unicode__` method that returns the name of the variable. This is useful to put a clear placeholder where variables will be filled by variants from `conda_build_config.yaml` during the actual build. However, `NullUndefined` doesn't overwrite the `__str__` method of Jinja's own `Undefined`, which returns an empty string.\r\n\r\nIn effect, linting in a Python 2 environment renders, e.g. `- {{ libjpeg }}` as `- libjpeg`, but in a Python 3 environment, we get `- ` which becomes `None` in the `requirements_section` dictionary.\n\n### Installed packages\n\n```shell\n-\n```\n\n\n### Environment info\n\n```shell\n-\n```\n\n", "before_files": [{"content": "import shutil\nimport tempfile\nimport io\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\ndef get_feedstock_name_from_meta(meta):\n \"\"\"Resolve the feedtstock name from the parsed meta.yaml.\"\"\"\n if \"feedstock-name\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"feedstock-name\"]\n elif \"parent_recipe\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n else:\n return meta.name()\n\n\ndef get_feedstock_about_from_meta(meta) -> dict:\n \"\"\"Fetch the feedtstock about from the parsed meta.yaml.\"\"\"\n # it turns out that conda_build would not preserve the feedstock about:\n # - if a subpackage does not have about, it uses the feedstock's\n # - if a subpackage has about, it's used as is\n # therefore we need to parse the yaml again just to get the about section...\n if \"parent_recipe\" in meta.meta[\"extra\"]:\n recipe_meta = os.path.join(\n meta.meta[\"extra\"][\"parent_recipe\"][\"path\"], \"meta.yaml\"\n )\n with io.open(recipe_meta, \"rt\") as fh:\n content = render_meta_yaml(\"\".join(fh))\n meta = get_yaml().load(content)\n return dict(meta[\"about\"])\n else:\n # no parent recipe for any reason, use self's about\n return dict(meta.meta[\"about\"])\n\n\ndef get_yaml():\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n # Don't use a global variable for this as a global\n # variable will make conda-smithy thread unsafe.\n yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n return yaml\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef stub_compatible_pin(*args, **kwargs):\n return f\"compatible_pin {args[0]}\"\n\n\ndef stub_subpackage_pin(*args, **kwargs):\n return f\"subpackage_pin {args[0]}\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=stub_subpackage_pin,\n pin_compatible=stub_compatible_pin,\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n mpi=\"mpi\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(forge_yaml):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = get_yaml().load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n get_yaml().dump(code, Path(forge_yaml))\n\n\ndef merge_dict(src, dest):\n \"\"\"Recursive merge dictionary\"\"\"\n for key, value in src.items():\n if isinstance(value, dict):\n # get node or create one\n node = dest.setdefault(key, {})\n merge_dict(value, node)\n else:\n dest[key] = value\n\n return dest\n", "path": "conda_smithy/utils.py"}]} | 2,123 | 95 |
gh_patches_debug_566 | rasdani/github-patches | git_diff | pex-tool__pex-797 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.0.1
On the docket:
+ [x] pex --index-url=... fails in 2.0.0 #794
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.0'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.0'
+__version__ = '2.0.1'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.0'\n+__version__ = '2.0.1'\n", "issue": "Release 2.0.1\nOn the docket:\r\n\r\n+ [x] pex --index-url=... fails in 2.0.0 #794\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.0'\n", "path": "pex/version.py"}]} | 618 | 94 |
gh_patches_debug_56452 | rasdani/github-patches | git_diff | netket__netket-111 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python bindings for Jastrow machines randomly failing
I realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 import sys
4 import platform
5 import subprocess
6
7 from setuptools import setup, Extension
8 from setuptools.command.build_ext import build_ext
9 from distutils.version import LooseVersion
10
11
12 class CMakeExtension(Extension):
13 def __init__(self, name, sourcedir=''):
14 Extension.__init__(self, name, sources=[])
15 self.sourcedir = os.path.abspath(sourcedir)
16
17
18 class CMakeBuild(build_ext):
19 def run(self):
20 try:
21 out = subprocess.check_output(['cmake', '--version'])
22 except OSError:
23 raise RuntimeError("CMake must be installed to build the following extensions: " +
24 ", ".join(e.name for e in self.extensions))
25
26 if platform.system() == "Windows":
27 cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
28 if cmake_version < '3.1.0':
29 raise RuntimeError("CMake >= 3.1.0 is required on Windows")
30
31 for ext in self.extensions:
32 self.build_extension(ext)
33
34 def build_extension(self, ext):
35 extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
36 cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
37 '-DPYTHON_EXECUTABLE=' + sys.executable]
38
39 cfg = 'Debug' if self.debug else 'Release'
40 build_args = ['--config', cfg]
41
42 if platform.system() == "Windows":
43 cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
44 if sys.maxsize > 2**32:
45 cmake_args += ['-A', 'x64']
46 build_args += ['--', '/m']
47 else:
48 cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
49 build_args += ['--', '-j2']
50
51 env = os.environ.copy()
52 env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
53 self.distribution.get_version())
54 if not os.path.exists(self.build_temp):
55 os.makedirs(self.build_temp)
56 subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
57 subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
58
59 setup(
60 name='netket',
61 version='0.1',
62 author='Giuseppe Carleo et al.',
63 description='NetKet',
64 url='http://github.com/netket/netket',
65 author_email='[email protected]',
66 license='Apache',
67 ext_modules=[CMakeExtension('netket')],
68 cmdclass=dict(build_ext=CMakeBuild),
69 zip_safe=False,
70 )
71
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
setup(
name='netket',
- version='0.1',
+ version='2.0',
author='Giuseppe Carleo et al.',
description='NetKet',
url='http://github.com/netket/netket',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n \n setup(\n name='netket',\n- version='0.1',\n+ version='2.0',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n", "issue": "Python bindings for Jastrow machines randomly failing\nI realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python \n", "before_files": [{"content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='0.1',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='[email protected]',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,318 | 86 |
gh_patches_debug_20708 | rasdani/github-patches | git_diff | cfpb__consumerfinance.gov-457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
little typo on The Bureau page
http://beta.consumerfinance.gov/the-bureau/
"Organizaitonal structure and leadership >"
should be Organizational
</issue>
<code>
[start of _lib/wordpress_post_processor.py]
1 import sys
2 import json
3 import os.path
4 import requests
5 from string import Template
6
7 import dateutil.parser
8
9 def posts_at_url(url):
10
11 current_page = 1
12 max_page = sys.maxint
13
14 while current_page <= max_page:
15
16 url = os.path.expandvars(url)
17 resp = requests.get(url, params={'page':current_page, 'count': '-1'})
18 results = json.loads(resp.content)
19 current_page += 1
20 max_page = results['pages']
21 total = 0
22 for p in results['posts']:
23 total += 1
24 yield p
25
26 def documents(name, url, **kwargs):
27
28 for post in posts_at_url(url):
29 yield process_post(post)
30
31
32 def process_post(post, newsroom = False):
33 del post['comments']
34 post['_id'] = post['slug']
35 # remove fields we're not interested in
36 if post['type'] == 'cfpb_newsroom':
37 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
38 elif post['type'] == 'post':
39 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
40 if post['type'] == 'watchroom':
41 post['author'] = [post['author']['name']]
42 # convert watchroom_data_x into a proper list
43 links = []
44 for x in xrange(0,10):
45 custom_fields = post['custom_fields']
46 key = 'watchroom_data_%s_link' % x
47 if key in custom_fields:
48 links.append(custom_fields[key])
49 post['links'] = links
50 else:
51 post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
52 post['author'] = [author['title'] for author in
53 post['taxonomy_fj_author'] if 'Press Release' not in
54 post['category']]
55 if newsroom and post['type'] == 'post':
56 post['category'][0] = "Blog"
57 author_template = Template("$first_name $last_name")
58 dt = dateutil.parser.parse(post['date'])
59 dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
60 post['date'] = dt_string
61 if 'twtr_text' in post['custom_fields']:
62 post['twtr_text'] = post['custom_fields']['twtr_text'][0]
63 if 'twtr_lang' in post['custom_fields']:
64 post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]
65 if 'twtr_rel' in post['custom_fields']:
66 post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]
67 if 'twtr_hash' in post['custom_fields']:
68 post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]
69 return post
70
[end of _lib/wordpress_post_processor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py
--- a/_lib/wordpress_post_processor.py
+++ b/_lib/wordpress_post_processor.py
@@ -37,13 +37,13 @@
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
elif post['type'] == 'post':
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
- if post['type'] == 'watchroom':
+ if post['type'] == 'featured_topic':
post['author'] = [post['author']['name']]
- # convert watchroom_data_x into a proper list
+ # convert featured_topic_data_x into a proper list
links = []
for x in xrange(0,10):
custom_fields = post['custom_fields']
- key = 'watchroom_data_%s_link' % x
+ key = 'featured_topic_data_%s_link' % x
if key in custom_fields:
links.append(custom_fields[key])
post['links'] = links
| {"golden_diff": "diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py\n--- a/_lib/wordpress_post_processor.py\n+++ b/_lib/wordpress_post_processor.py\n@@ -37,13 +37,13 @@\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n- if post['type'] == 'watchroom':\n+ if post['type'] == 'featured_topic':\n post['author'] = [post['author']['name']]\n- # convert watchroom_data_x into a proper list\n+ # convert featured_topic_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n- key = 'watchroom_data_%s_link' % x\n+ key = 'featured_topic_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n", "issue": "little typo on The Bureau page\nhttp://beta.consumerfinance.gov/the-bureau/\n\n\"Organizaitonal structure and leadership >\"\n\nshould be Organizational\n\n", "before_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\nfrom string import Template\n\nimport dateutil.parser\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n total = 0\n for p in results['posts']:\n total += 1\n yield p\n\ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_post(post)\n\n\ndef process_post(post, newsroom = False):\n del post['comments']\n post['_id'] = post['slug']\n # remove fields we're not interested in\n if post['type'] == 'cfpb_newsroom':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n if post['type'] == 'watchroom':\n post['author'] = [post['author']['name']]\n # convert watchroom_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n key = 'watchroom_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n else:\n post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]\n post['author'] = [author['title'] for author in\n post['taxonomy_fj_author'] if 'Press Release' not in\n post['category']]\n if newsroom and post['type'] == 'post':\n post['category'][0] = \"Blog\"\n author_template = Template(\"$first_name $last_name\")\n dt = dateutil.parser.parse(post['date'])\n dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')\n post['date'] = dt_string\n if 'twtr_text' in post['custom_fields']:\n post['twtr_text'] = post['custom_fields']['twtr_text'][0]\n if 'twtr_lang' in post['custom_fields']:\n post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]\n if 'twtr_rel' in post['custom_fields']:\n post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]\n if 'twtr_hash' in post['custom_fields']:\n post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]\n return post\n", "path": "_lib/wordpress_post_processor.py"}]} | 1,343 | 261 |
gh_patches_debug_22554 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-3313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ops_template.py: Take care of json src variable
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ops_template.py
##### ANSIBLE VERSION
```
air$ ansible-playbook --version
ansible-playbook 2.1.0
config file = /usr/local/git/github.com/dc-on-docker/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
As mentioned in [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133), configuring OpenSwitch through the [OpenSwitch switch role](https://github.com/keinohguchi/ops-switch-role).
##### OS / ENVIRONMENT
Reproduced on ArchLinux control machine, just like [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133).
##### SUMMARY
Call `module.from_json` in case of `dc` or `rest` transport case.
##### STEPS TO REPRODUCE
```
---
- name: fabric switches
hosts: fabrics
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
- name: spine switches
hosts: spines
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
- name: leaf switches
hosts: leaves
gather_facts: yes
vars:
ansible_user: admin
ops_system_hostname: "{{ inventory_hostname }}"
ops_debug: no
roles:
- role: switch
ops_intf_admin_state: up
```
##### EXPECTED RESULTS
Configure the OPS switch both through DC or REST APIs.
##### ACTUAL RESULTS
```
air$ play site.yaml
PLAY [fabric switches] *********************************************************
TASK [setup] *******************************************************************
ok: [fab1]
TASK [switch : print JSON input for this play] *********************************
skipping: [fab1]
TASK [switch : configure the switch] *******************************************
fatal: [fab1]: FAILED! => {"changed": false, "failed": true, "msg": "argument src is of type <type 'str'> and we were unable to convert
to dict"}
```
</issue>
<code>
[start of network/openswitch/ops_template.py]
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18 DOCUMENTATION = """
19 ---
20 module: ops_template
21 version_added: "2.1"
22 author: "Peter Sprygada (@privateip)"
23 short_description: Push configuration to OpenSwitch
24 description:
25 - The OpenSwitch platform provides a library for pushing JSON structured
26 configuration files into the current running-config. This module
27 will read the current configuration from OpenSwitch and compare it
28 against a provided candidate configuration. If there are changes, the
29 candidate configuration is merged with the current configuration and
30 pushed into OpenSwitch
31 extends_documentation_fragment: openswitch
32 options:
33 src:
34 description:
35 - The path to the config source. The source can be either a
36 file with config or a template that will be merged during
37 runtime. By default the task will search for the source
38 file in role or playbook root folder in templates directory.
39 required: true
40 force:
41 description:
42 - The force argument instructs the module to not consider the
43 current devices running-config. When set to true, this will
44 cause the module to push the contents of I(src) into the device
45 without first checking if already configured.
46 required: false
47 default: false
48 choices: ['yes', 'no']
49 backup:
50 description:
51 - When this argument is configured true, the module will backup
52 the running-config from the node prior to making any changes.
53 The backup file will be written to backups/ in
54 the root of the playbook directory.
55 required: false
56 default: false
57 choices: ['yes', 'no']
58 config:
59 description:
60 - The module, by default, will connect to the remote device and
61 retrieve the current running-config to use as a base for comparing
62 against the contents of source. There are times when it is not
63 desirable to have the task get the current running-config for
64 every task in a playbook. The I(config) argument allows the
65 implementer to pass in the configuruation to use as the base
66 config for comparision.
67 required: false
68 default: null
69 """
70
71 EXAMPLES = """
72 - name: set hostname with file lookup
73 ops_template:
74 src: ./hostname.json
75 backup: yes
76 remote_user: admin
77 become: yes
78
79 - name: set hostname with var
80 ops_template:
81 src: "{{ config }}"
82 remote_user: admin
83 become: yes
84 """
85
86 RETURN = """
87 updates:
88 description: The list of configuration updates to be merged
89 retured: always
90 type: dict
91 sample: {obj, obj}
92 responses:
93 desription: returns the responses when configuring using cli
94 returned: when transport == cli
95 type: list
96 sample: [...]
97 """
98 import copy
99
100 def compare(this, other):
101 parents = [item.text for item in this.parents]
102 for entry in other:
103 if this == entry:
104 return None
105 return this
106
107 def expand(obj, queue):
108 block = [item.raw for item in obj.parents]
109 block.append(obj.raw)
110
111 current_level = queue
112 for b in block:
113 if b not in current_level:
114 current_level[b] = collections.OrderedDict()
115 current_level = current_level[b]
116 for c in obj.children:
117 if c.raw not in current_level:
118 current_level[c.raw] = collections.OrderedDict()
119
120 def flatten(data, obj):
121 for k, v in data.items():
122 obj.append(k)
123 flatten(v, obj)
124 return obj
125
126 def get_config(module):
127 config = module.params['config'] or dict()
128 if not config and not module.params['force']:
129 config = module.config
130 return config
131
132 def sort(val):
133 if isinstance(val, (list, set)):
134 return sorted(val)
135 return val
136
137 def diff(this, other, path=None):
138 updates = list()
139 path = path or list()
140 for key, value in this.items():
141 if key not in other:
142 other_value = other.get(key)
143 updates.append((list(path), key, value, other_value))
144 else:
145 if isinstance(this[key], dict):
146 path.append(key)
147 updates.extend(diff(this[key], other[key], list(path)))
148 path.pop()
149 else:
150 other_value = other.get(key)
151 if sort(this[key]) != sort(other_value):
152 updates.append((list(path), key, value, other_value))
153 return updates
154
155 def merge(changeset, config=None):
156 config = config or dict()
157 for path, key, value, _ in changeset:
158 current_level = config
159 for part in path:
160 if part not in current_level:
161 current_level[part] = dict()
162 current_level = current_level[part]
163 current_level[key] = value
164 return config
165
166 def main():
167 """ main entry point for module execution
168 """
169
170 argument_spec = dict(
171 src=dict(type='dict'),
172 force=dict(default=False, type='bool'),
173 backup=dict(default=False, type='bool'),
174 config=dict(type='dict'),
175 )
176
177 mutually_exclusive = [('config', 'backup'), ('config', 'force')]
178
179 module = get_module(argument_spec=argument_spec,
180 mutually_exclusive=mutually_exclusive,
181 supports_check_mode=True)
182
183 src = module.params['src']
184
185 result = dict(changed=False)
186
187 contents = get_config(module)
188 result['_backup'] = copy.deepcopy(module.config)
189
190 if module.params['transport'] in ['ssh', 'rest']:
191 config = contents
192
193 changeset = diff(src, config)
194 candidate = merge(changeset, config)
195
196 updates = dict()
197 for path, key, new_value, old_value in changeset:
198 path = '%s.%s' % ('.'.join(path), key)
199 updates[path] = new_value
200 result['updates'] = updates
201
202 if changeset:
203 if not module.check_mode:
204 module.configure(config)
205 result['changed'] = True
206
207 else:
208 config = module.parse_config(config)
209 candidate = module.parse_config(module.params['src'])
210
211 commands = collections.OrderedDict()
212 toplevel = [c.text for c in config]
213
214 for line in candidate:
215 if line.text in ['!', '']:
216 continue
217
218 if not line.parents:
219 if line.text not in toplevel:
220 expand(line, commands)
221 else:
222 item = compare(line, config)
223 if item:
224 expand(item, commands)
225
226 commands = flatten(commands, list())
227
228 if commands:
229 if not module.check_mode:
230 commands = [str(c).strip() for c in commands]
231 response = module.configure(commands)
232 result['responses'] = response
233 result['changed'] = True
234 result['updates'] = commands
235
236 module.exit_json(**result)
237
238 from ansible.module_utils.basic import *
239 from ansible.module_utils.urls import *
240 from ansible.module_utils.netcfg import *
241 from ansible.module_utils.shell import *
242 from ansible.module_utils.openswitch import *
243 if __name__ == '__main__':
244 main()
245
246
247
248
[end of network/openswitch/ops_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/network/openswitch/ops_template.py b/network/openswitch/ops_template.py
--- a/network/openswitch/ops_template.py
+++ b/network/openswitch/ops_template.py
@@ -168,7 +168,7 @@
"""
argument_spec = dict(
- src=dict(type='dict'),
+ src=dict(type='str'),
force=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(type='dict'),
@@ -180,8 +180,6 @@
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
- src = module.params['src']
-
result = dict(changed=False)
contents = get_config(module)
@@ -189,6 +187,7 @@
if module.params['transport'] in ['ssh', 'rest']:
config = contents
+ src = module.from_json(module.params['src'])
changeset = diff(src, config)
candidate = merge(changeset, config)
| {"golden_diff": "diff --git a/network/openswitch/ops_template.py b/network/openswitch/ops_template.py\n--- a/network/openswitch/ops_template.py\n+++ b/network/openswitch/ops_template.py\n@@ -168,7 +168,7 @@\n \"\"\"\n \n argument_spec = dict(\n- src=dict(type='dict'),\n+ src=dict(type='str'),\n force=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n config=dict(type='dict'),\n@@ -180,8 +180,6 @@\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n \n- src = module.params['src']\n-\n result = dict(changed=False)\n \n contents = get_config(module)\n@@ -189,6 +187,7 @@\n \n if module.params['transport'] in ['ssh', 'rest']:\n config = contents\n+ src = module.from_json(module.params['src'])\n \n changeset = diff(src, config)\n candidate = merge(changeset, config)\n", "issue": "ops_template.py: Take care of json src variable\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nops_template.py\n##### ANSIBLE VERSION\n\n```\nair$ ansible-playbook --version\nansible-playbook 2.1.0\n config file = /usr/local/git/github.com/dc-on-docker/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nAs mentioned in [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133), configuring OpenSwitch through the [OpenSwitch switch role](https://github.com/keinohguchi/ops-switch-role).\n##### OS / ENVIRONMENT\n\nReproduced on ArchLinux control machine, just like [ansible/ansible #15133](https://github.com/ansible/ansible/issues/15133).\n##### SUMMARY\n\nCall `module.from_json` in case of `dc` or `rest` transport case.\n##### STEPS TO REPRODUCE\n\n```\n\n---\n- name: fabric switches\n hosts: fabrics\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n\n- name: spine switches\n hosts: spines\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n\n- name: leaf switches\n hosts: leaves\n gather_facts: yes\n vars:\n ansible_user: admin\n ops_system_hostname: \"{{ inventory_hostname }}\"\n ops_debug: no\n roles:\n - role: switch\n ops_intf_admin_state: up\n```\n##### EXPECTED RESULTS\n\nConfigure the OPS switch both through DC or REST APIs.\n##### ACTUAL RESULTS\n\n```\nair$ play site.yaml\n\nPLAY [fabric switches] *********************************************************\n\nTASK [setup] *******************************************************************\nok: [fab1]\n\nTASK [switch : print JSON input for this play] *********************************\nskipping: [fab1]\n\nTASK [switch : configure the switch] *******************************************\nfatal: [fab1]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"argument src is of type <type 'str'> and we were unable to convert\nto dict\"}\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nDOCUMENTATION = \"\"\"\n---\nmodule: ops_template\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Push configuration to OpenSwitch\ndescription:\n - The OpenSwitch platform provides a library for pushing JSON structured\n configuration files into the current running-config. This module\n will read the current configuration from OpenSwitch and compare it\n against a provided candidate configuration. If there are changes, the\n candidate configuration is merged with the current configuration and\n pushed into OpenSwitch\nextends_documentation_fragment: openswitch\noptions:\n src:\n description:\n - The path to the config source. The source can be either a\n file with config or a template that will be merged during\n runtime. By default the task will search for the source\n file in role or playbook root folder in templates directory.\n required: true\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n backup:\n description:\n - When this argument is configured true, the module will backup\n the running-config from the node prior to making any changes.\n The backup file will be written to backups/ in\n the root of the playbook directory.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: set hostname with file lookup\n ops_template:\n src: ./hostname.json\n backup: yes\n remote_user: admin\n become: yes\n\n- name: set hostname with var\n ops_template:\n src: \"{{ config }}\"\n remote_user: admin\n become: yes\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The list of configuration updates to be merged\n retured: always\n type: dict\n sample: {obj, obj}\nresponses:\n desription: returns the responses when configuring using cli\n returned: when transport == cli\n type: list\n sample: [...]\n\"\"\"\nimport copy\n\ndef compare(this, other):\n parents = [item.text for item in this.parents]\n for entry in other:\n if this == entry:\n return None\n return this\n\ndef expand(obj, queue):\n block = [item.raw for item in obj.parents]\n block.append(obj.raw)\n\n current_level = queue\n for b in block:\n if b not in current_level:\n current_level[b] = collections.OrderedDict()\n current_level = current_level[b]\n for c in obj.children:\n if c.raw not in current_level:\n current_level[c.raw] = collections.OrderedDict()\n\ndef flatten(data, obj):\n for k, v in data.items():\n obj.append(k)\n flatten(v, obj)\n return obj\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\ndef sort(val):\n if isinstance(val, (list, set)):\n return sorted(val)\n return val\n\ndef diff(this, other, path=None):\n updates = list()\n path = path or list()\n for key, value in this.items():\n if key not in other:\n other_value = other.get(key)\n updates.append((list(path), key, value, other_value))\n else:\n if isinstance(this[key], dict):\n path.append(key)\n updates.extend(diff(this[key], other[key], list(path)))\n path.pop()\n else:\n other_value = other.get(key)\n if sort(this[key]) != sort(other_value):\n updates.append((list(path), key, value, other_value))\n return updates\n\ndef merge(changeset, config=None):\n config = config or dict()\n for path, key, value, _ in changeset:\n current_level = config\n for part in path:\n if part not in current_level:\n current_level[part] = dict()\n current_level = current_level[part]\n current_level[key] = value\n return config\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n\n argument_spec = dict(\n src=dict(type='dict'),\n force=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n config=dict(type='dict'),\n )\n\n mutually_exclusive = [('config', 'backup'), ('config', 'force')]\n\n module = get_module(argument_spec=argument_spec,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n src = module.params['src']\n\n result = dict(changed=False)\n\n contents = get_config(module)\n result['_backup'] = copy.deepcopy(module.config)\n\n if module.params['transport'] in ['ssh', 'rest']:\n config = contents\n\n changeset = diff(src, config)\n candidate = merge(changeset, config)\n\n updates = dict()\n for path, key, new_value, old_value in changeset:\n path = '%s.%s' % ('.'.join(path), key)\n updates[path] = new_value\n result['updates'] = updates\n\n if changeset:\n if not module.check_mode:\n module.configure(config)\n result['changed'] = True\n\n else:\n config = module.parse_config(config)\n candidate = module.parse_config(module.params['src'])\n\n commands = collections.OrderedDict()\n toplevel = [c.text for c in config]\n\n for line in candidate:\n if line.text in ['!', '']:\n continue\n\n if not line.parents:\n if line.text not in toplevel:\n expand(line, commands)\n else:\n item = compare(line, config)\n if item:\n expand(item, commands)\n\n commands = flatten(commands, list())\n\n if commands:\n if not module.check_mode:\n commands = [str(c).strip() for c in commands]\n response = module.configure(commands)\n result['responses'] = response\n result['changed'] = True\n result['updates'] = commands\n\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nfrom ansible.module_utils.netcfg import *\nfrom ansible.module_utils.shell import *\nfrom ansible.module_utils.openswitch import *\nif __name__ == '__main__':\n main()\n\n\n\n", "path": "network/openswitch/ops_template.py"}]} | 3,385 | 232 |
gh_patches_debug_15794 | rasdani/github-patches | git_diff | systemd__mkosi-2611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/var/lib/dpkg/available does not exist and causes errors when installing packages using apt in image
### mkosi commit the issue has been seen with
v22
### Used host distribution
Debian 12 (backports enabled)
### Used target distribution
Ubuntu 20.04
### Linux kernel version used
6.1.0-18-amd64
### CPU architectures issue was seen on
x86_64
### Unexpected behaviour you saw
mkosi does not create /var/lib/dpkg/available which is used by the old tool dselect. This caused errors when using piuparts to test Debian packages. The error occurs when piuparts installs the package to be tested into a chroot environment.
We have already found a workaround by running the following command in a prepare script:
```bash
dpkg --update-avail
```
I'm wondering if this command should be part of the mkosi build process.
### Used mkosi config
```ini
[Output]
Format=tar
CompressOutput=false
[Host]
QemuSwtpm=False
# setup qemu to exit on kernel panic
# https://unix.stackexchange.com/questions/443017/can-i-make-qemu-exit-with-failure-on-kernel-panic
QemuArgs=-no-reboot
KernelCommandLineExtra=panic=-1
[Content]
Bootable=false
WithDocs=false
PackageDirectories=../extra_packages/
[Packages]
WithNetwork=true
[Distribution]
Architecture=x86-64
Distribution=ubuntu
Release=focal
Repositories=main,universe
@Mirror=http://my-package-mirror.local/ubuntu
[Content]
Packages=apt
apt-transport-https
sudo
ca-certificates
ca-custom
gpg
SkeletonTrees=../../../../data/rootfs
```
### mkosi output
```sh
piuparts output:
Preconfiguring packages ...
dpkg: error: failed to open package info file '/var/lib/dpkg/available' for reading: No such file or directory
Fetched 497 MB in 11s (44.2 MB/s)
E: Sub-process dpkg --set-selections returned an error code (2)
E: Couldn't record the approved state changes as dpkg selection states
```
</issue>
<code>
[start of mkosi/installer/apt.py]
1 # SPDX-License-Identifier: LGPL-2.1+
2 import os
3 import textwrap
4 from collections.abc import Iterable, Sequence
5 from pathlib import Path
6 from typing import NamedTuple, Optional
7
8 from mkosi.config import Config, ConfigFeature
9 from mkosi.context import Context
10 from mkosi.installer import PackageManager
11 from mkosi.log import die
12 from mkosi.mounts import finalize_source_mounts
13 from mkosi.run import find_binary, run
14 from mkosi.sandbox import Mount, apivfs_cmd
15 from mkosi.types import _FILE, CompletedProcess, PathString
16 from mkosi.util import umask
17
18
19 class Apt(PackageManager):
20 class Repository(NamedTuple):
21 types: tuple[str, ...]
22 url: str
23 suite: str
24 components: tuple[str, ...]
25 signedby: Optional[Path]
26
27 def __str__(self) -> str:
28 return textwrap.dedent(
29 f"""\
30 Types: {" ".join(self.types)}
31 URIs: {self.url}
32 Suites: {self.suite}
33 Components: {" ".join(self.components)}
34 {"Signed-By" if self.signedby else "Trusted"}: {self.signedby or "yes"}
35
36 """
37 )
38
39 @classmethod
40 def executable(cls, config: Config) -> str:
41 return "apt"
42
43 @classmethod
44 def subdir(cls, config: Config) -> Path:
45 return Path("apt")
46
47 @classmethod
48 def cache_subdirs(cls, cache: Path) -> list[Path]:
49 return [cache / "archives"]
50
51 @classmethod
52 def scripts(cls, context: Context) -> dict[str, list[PathString]]:
53 return {
54 **{
55 command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in (
56 "apt",
57 "apt-cache",
58 "apt-cdrom",
59 "apt-config",
60 "apt-extracttemplates",
61 "apt-get",
62 "apt-key",
63 "apt-mark",
64 "apt-sortpkgs",
65 )
66 },
67 "mkosi-install" : ["apt-get", "install"],
68 "mkosi-upgrade" : ["apt-get", "upgrade"],
69 "mkosi-remove" : ["apt-get", "purge"],
70 "mkosi-reinstall": ["apt-get", "install", "--reinstall"],
71 }
72
73 @classmethod
74 def setup(cls, context: Context, repos: Iterable[Repository]) -> None:
75 (context.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True)
76 (context.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True)
77 (context.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True)
78 (context.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True)
79
80 with umask(~0o755):
81 # TODO: Drop once apt 2.5.4 is widely available.
82 (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
83 (context.root / "var/lib/dpkg/status").touch()
84
85 # We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG
86 # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is
87 # required because apt parses CLI configuration options after parsing its configuration files and as such we
88 # can't use CLI options to tell apt where to look for configuration files.
89 config = context.pkgmngr / "etc/apt.conf"
90 if not config.exists():
91 config.write_text(
92 textwrap.dedent(
93 """\
94 Dir::Etc "/etc/apt";
95 """
96 )
97 )
98
99 sources = context.pkgmngr / "etc/apt/sources.list.d/mkosi.sources"
100 if not sources.exists():
101 for repo in repos:
102 if repo.signedby and not repo.signedby.exists():
103 die(
104 f"Keyring for repo {repo.url} not found at {repo.signedby}",
105 hint="Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is "
106 "installed",
107 )
108
109 with sources.open("w") as f:
110 for repo in repos:
111 f.write(str(repo))
112
113 @classmethod
114 def finalize_environment(cls, context: Context) -> dict[str, str]:
115 env = {
116 "APT_CONFIG": "/etc/apt.conf",
117 "DEBIAN_FRONTEND" : "noninteractive",
118 "DEBCONF_INTERACTIVE_SEEN": "true",
119 }
120
121 if "INITRD" not in context.config.environment and context.config.bootable != ConfigFeature.disabled:
122 env["INITRD"] = "No"
123
124 return super().finalize_environment(context) | env
125
126 @classmethod
127 def cmd(cls, context: Context, command: str) -> list[PathString]:
128 debarch = context.config.distribution.architecture(context.config.architecture)
129
130 cmdline: list[PathString] = [
131 command,
132 "-o", f"APT::Architecture={debarch}",
133 "-o", f"APT::Architectures={debarch}",
134 "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}",
135 "-o", "APT::Immediate-Configure=off",
136 "-o", "APT::Get::Assume-Yes=true",
137 "-o", "APT::Get::AutomaticRemove=true",
138 "-o", "APT::Get::Allow-Change-Held-Packages=true",
139 "-o", "APT::Get::Allow-Remove-Essential=true",
140 "-o", "APT::Sandbox::User=root",
141 "-o", "Dir::Cache=/var/cache/apt",
142 "-o", "Dir::State=/var/lib/apt",
143 "-o", "Dir::Log=/var/log/apt",
144 "-o", "Dir::State::Status=/buildroot/var/lib/dpkg/status",
145 "-o", f"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}",
146 "-o", "Debug::NoLocking=true",
147 "-o", "DPkg::Options::=--root=/buildroot",
148 "-o", "DPkg::Options::=--force-unsafe-io",
149 "-o", "DPkg::Options::=--force-architecture",
150 "-o", "DPkg::Options::=--force-depends",
151 "-o", "DPkg::Options::=--no-debsig",
152 "-o", "DPkg::Use-Pty=false",
153 "-o", "DPkg::Install::Recursive::Minimum=1000",
154 "-o", "pkgCacheGen::ForceEssential=,",
155 ]
156
157 if not context.config.repository_key_check:
158 cmdline += [
159 "-o", "Acquire::AllowInsecureRepositories=true",
160 "-o", "Acquire::AllowDowngradeToInsecureRepositories=true",
161 "-o", "APT::Get::AllowUnauthenticated=true",
162 ]
163
164 if not context.config.with_docs:
165 cmdline += [
166 "-o", "DPkg::Options::=--path-exclude=/usr/share/doc/*",
167 "-o", "DPkg::Options::=--path-include=/usr/share/doc/*/copyright",
168 "-o", "DPkg::Options::=--path-exclude=/usr/share/man/*",
169 "-o", "DPkg::Options::=--path-exclude=/usr/share/groff/*",
170 "-o", "DPkg::Options::=--path-exclude=/usr/share/info/*",
171 ]
172
173 if context.config.proxy_url:
174 cmdline += [
175 "-o", f"Acquire::http::Proxy={context.config.proxy_url}",
176 "-o", f"Acquire::https::Proxy={context.config.proxy_url}",
177 ]
178
179 return cmdline
180
181 @classmethod
182 def invoke(
183 cls,
184 context: Context,
185 operation: str,
186 arguments: Sequence[str] = (),
187 *,
188 apivfs: bool = False,
189 mounts: Sequence[Mount] = (),
190 stdout: _FILE = None,
191 ) -> CompletedProcess:
192 with finalize_source_mounts(
193 context.config,
194 ephemeral=os.getuid() == 0 and context.config.build_sources_ephemeral,
195 ) as sources:
196 return run(
197 cls.cmd(context, "apt-get") + [operation, *arguments],
198 sandbox=(
199 context.sandbox(
200 network=True,
201 mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context), *sources, *mounts],
202 options=["--dir", "/work/src", "--chdir", "/work/src"],
203 ) + (apivfs_cmd() if apivfs else [])
204 ),
205 env=context.config.environment | cls.finalize_environment(context),
206 stdout=stdout,
207 )
208
209 @classmethod
210 def sync(cls, context: Context) -> None:
211 cls.invoke(context, "update")
212
213 @classmethod
214 def createrepo(cls, context: Context) -> None:
215 with (context.packages / "Packages").open("wb") as f:
216 run(
217 ["dpkg-scanpackages", "."],
218 stdout=f,
219 sandbox=context.sandbox(
220 mounts=[Mount(context.packages, context.packages, ro=True)],
221 options=["--chdir", context.packages],
222 ),
223 )
224
225 (context.pkgmngr / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True)
226 (context.pkgmngr / "etc/apt/sources.list.d/mkosi-local.sources").write_text(
227 textwrap.dedent(
228 """\
229 Enabled: yes
230 Types: deb
231 URIs: file:///work/packages
232 Suites: ./
233 Trusted: yes
234 """
235 )
236 )
237
238 cls.invoke(
239 context,
240 "update",
241 arguments=[
242 "-o", "Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources",
243 "-o", "Dir::Etc::sourceparts=-",
244 "-o", "APT::Get::List-Cleanup=0",
245 ],
246 )
247
[end of mkosi/installer/apt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py
--- a/mkosi/installer/apt.py
+++ b/mkosi/installer/apt.py
@@ -82,6 +82,8 @@
(context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
(context.root / "var/lib/dpkg/status").touch()
+ (context.root / "var/lib/dpkg/available").touch()
+
# We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG
# to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is
# required because apt parses CLI configuration options after parsing its configuration files and as such we
| {"golden_diff": "diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py\n--- a/mkosi/installer/apt.py\n+++ b/mkosi/installer/apt.py\n@@ -82,6 +82,8 @@\n (context.root / \"var/lib/dpkg\").mkdir(parents=True, exist_ok=True)\n (context.root / \"var/lib/dpkg/status\").touch()\n \n+ (context.root / \"var/lib/dpkg/available\").touch()\n+\n # We have a special apt.conf outside of pkgmngr dir that only configures \"Dir::Etc\" that we pass to APT_CONFIG\n # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is\n # required because apt parses CLI configuration options after parsing its configuration files and as such we\n", "issue": "/var/lib/dpkg/available does not exist and causes errors when installing packages using apt in image\n### mkosi commit the issue has been seen with\n\nv22\n\n### Used host distribution\n\nDebian 12 (backports enabled)\n\n### Used target distribution\n\nUbuntu 20.04\n\n### Linux kernel version used\n\n6.1.0-18-amd64\n\n### CPU architectures issue was seen on\n\nx86_64\n\n### Unexpected behaviour you saw\n\nmkosi does not create /var/lib/dpkg/available which is used by the old tool dselect. This caused errors when using piuparts to test Debian packages. The error occurs when piuparts installs the package to be tested into a chroot environment.\r\n\r\nWe have already found a workaround by running the following command in a prepare script:\r\n```bash\r\ndpkg --update-avail\r\n```\r\n\r\nI'm wondering if this command should be part of the mkosi build process.\n\n### Used mkosi config\n\n```ini\n[Output]\r\nFormat=tar\r\nCompressOutput=false\r\n\r\n[Host]\r\nQemuSwtpm=False\r\n# setup qemu to exit on kernel panic\r\n# https://unix.stackexchange.com/questions/443017/can-i-make-qemu-exit-with-failure-on-kernel-panic\r\nQemuArgs=-no-reboot\r\nKernelCommandLineExtra=panic=-1\r\n\r\n[Content]\r\nBootable=false\r\nWithDocs=false\r\nPackageDirectories=../extra_packages/\r\n\r\n[Packages]\r\nWithNetwork=true\r\n\r\n[Distribution]\r\nArchitecture=x86-64\r\nDistribution=ubuntu\r\nRelease=focal\r\nRepositories=main,universe\r\n\r\n@Mirror=http://my-package-mirror.local/ubuntu\r\n\r\n[Content]\r\nPackages=apt\r\n apt-transport-https\r\n sudo\r\n ca-certificates\r\n ca-custom\r\n gpg\r\nSkeletonTrees=../../../../data/rootfs\n```\n\n\n### mkosi output\n\n```sh\npiuparts output:\r\n\r\nPreconfiguring packages ...\r\n dpkg: error: failed to open package info file '/var/lib/dpkg/available' for reading: No such file or directory\r\n Fetched 497 MB in 11s (44.2 MB/s)\r\n E: Sub-process dpkg --set-selections returned an error code (2)\r\n E: Couldn't record the approved state changes as dpkg selection states\n```\n\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport os\nimport textwrap\nfrom collections.abc import Iterable, Sequence\nfrom pathlib import Path\nfrom typing import NamedTuple, Optional\n\nfrom mkosi.config import Config, ConfigFeature\nfrom mkosi.context import Context\nfrom mkosi.installer import PackageManager\nfrom mkosi.log import die\nfrom mkosi.mounts import finalize_source_mounts\nfrom mkosi.run import find_binary, run\nfrom mkosi.sandbox import Mount, apivfs_cmd\nfrom mkosi.types import _FILE, CompletedProcess, PathString\nfrom mkosi.util import umask\n\n\nclass Apt(PackageManager):\n class Repository(NamedTuple):\n types: tuple[str, ...]\n url: str\n suite: str\n components: tuple[str, ...]\n signedby: Optional[Path]\n\n def __str__(self) -> str:\n return textwrap.dedent(\n f\"\"\"\\\n Types: {\" \".join(self.types)}\n URIs: {self.url}\n Suites: {self.suite}\n Components: {\" \".join(self.components)}\n {\"Signed-By\" if self.signedby else \"Trusted\"}: {self.signedby or \"yes\"}\n\n \"\"\"\n )\n\n @classmethod\n def executable(cls, config: Config) -> str:\n return \"apt\"\n\n @classmethod\n def subdir(cls, config: Config) -> Path:\n return Path(\"apt\")\n\n @classmethod\n def cache_subdirs(cls, cache: Path) -> list[Path]:\n return [cache / \"archives\"]\n\n @classmethod\n def scripts(cls, context: Context) -> dict[str, list[PathString]]:\n return {\n **{\n command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in (\n \"apt\",\n \"apt-cache\",\n \"apt-cdrom\",\n \"apt-config\",\n \"apt-extracttemplates\",\n \"apt-get\",\n \"apt-key\",\n \"apt-mark\",\n \"apt-sortpkgs\",\n )\n },\n \"mkosi-install\" : [\"apt-get\", \"install\"],\n \"mkosi-upgrade\" : [\"apt-get\", \"upgrade\"],\n \"mkosi-remove\" : [\"apt-get\", \"purge\"],\n \"mkosi-reinstall\": [\"apt-get\", \"install\", \"--reinstall\"],\n }\n\n @classmethod\n def setup(cls, context: Context, repos: Iterable[Repository]) -> None:\n (context.pkgmngr / \"etc/apt\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/apt.conf.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/preferences.d\").mkdir(exist_ok=True, parents=True)\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(exist_ok=True, parents=True)\n\n with umask(~0o755):\n # TODO: Drop once apt 2.5.4 is widely available.\n (context.root / \"var/lib/dpkg\").mkdir(parents=True, exist_ok=True)\n (context.root / \"var/lib/dpkg/status\").touch()\n\n # We have a special apt.conf outside of pkgmngr dir that only configures \"Dir::Etc\" that we pass to APT_CONFIG\n # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is\n # required because apt parses CLI configuration options after parsing its configuration files and as such we\n # can't use CLI options to tell apt where to look for configuration files.\n config = context.pkgmngr / \"etc/apt.conf\"\n if not config.exists():\n config.write_text(\n textwrap.dedent(\n \"\"\"\\\n Dir::Etc \"/etc/apt\";\n \"\"\"\n )\n )\n\n sources = context.pkgmngr / \"etc/apt/sources.list.d/mkosi.sources\"\n if not sources.exists():\n for repo in repos:\n if repo.signedby and not repo.signedby.exists():\n die(\n f\"Keyring for repo {repo.url} not found at {repo.signedby}\",\n hint=\"Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is \"\n \"installed\",\n )\n\n with sources.open(\"w\") as f:\n for repo in repos:\n f.write(str(repo))\n\n @classmethod\n def finalize_environment(cls, context: Context) -> dict[str, str]:\n env = {\n \"APT_CONFIG\": \"/etc/apt.conf\",\n \"DEBIAN_FRONTEND\" : \"noninteractive\",\n \"DEBCONF_INTERACTIVE_SEEN\": \"true\",\n }\n\n if \"INITRD\" not in context.config.environment and context.config.bootable != ConfigFeature.disabled:\n env[\"INITRD\"] = \"No\"\n\n return super().finalize_environment(context) | env\n\n @classmethod\n def cmd(cls, context: Context, command: str) -> list[PathString]:\n debarch = context.config.distribution.architecture(context.config.architecture)\n\n cmdline: list[PathString] = [\n command,\n \"-o\", f\"APT::Architecture={debarch}\",\n \"-o\", f\"APT::Architectures={debarch}\",\n \"-o\", f\"APT::Install-Recommends={str(context.config.with_recommends).lower()}\",\n \"-o\", \"APT::Immediate-Configure=off\",\n \"-o\", \"APT::Get::Assume-Yes=true\",\n \"-o\", \"APT::Get::AutomaticRemove=true\",\n \"-o\", \"APT::Get::Allow-Change-Held-Packages=true\",\n \"-o\", \"APT::Get::Allow-Remove-Essential=true\",\n \"-o\", \"APT::Sandbox::User=root\",\n \"-o\", \"Dir::Cache=/var/cache/apt\",\n \"-o\", \"Dir::State=/var/lib/apt\",\n \"-o\", \"Dir::Log=/var/log/apt\",\n \"-o\", \"Dir::State::Status=/buildroot/var/lib/dpkg/status\",\n \"-o\", f\"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}\",\n \"-o\", \"Debug::NoLocking=true\",\n \"-o\", \"DPkg::Options::=--root=/buildroot\",\n \"-o\", \"DPkg::Options::=--force-unsafe-io\",\n \"-o\", \"DPkg::Options::=--force-architecture\",\n \"-o\", \"DPkg::Options::=--force-depends\",\n \"-o\", \"DPkg::Options::=--no-debsig\",\n \"-o\", \"DPkg::Use-Pty=false\",\n \"-o\", \"DPkg::Install::Recursive::Minimum=1000\",\n \"-o\", \"pkgCacheGen::ForceEssential=,\",\n ]\n\n if not context.config.repository_key_check:\n cmdline += [\n \"-o\", \"Acquire::AllowInsecureRepositories=true\",\n \"-o\", \"Acquire::AllowDowngradeToInsecureRepositories=true\",\n \"-o\", \"APT::Get::AllowUnauthenticated=true\",\n ]\n\n if not context.config.with_docs:\n cmdline += [\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/doc/*\",\n \"-o\", \"DPkg::Options::=--path-include=/usr/share/doc/*/copyright\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/man/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/groff/*\",\n \"-o\", \"DPkg::Options::=--path-exclude=/usr/share/info/*\",\n ]\n\n if context.config.proxy_url:\n cmdline += [\n \"-o\", f\"Acquire::http::Proxy={context.config.proxy_url}\",\n \"-o\", f\"Acquire::https::Proxy={context.config.proxy_url}\",\n ]\n\n return cmdline\n\n @classmethod\n def invoke(\n cls,\n context: Context,\n operation: str,\n arguments: Sequence[str] = (),\n *,\n apivfs: bool = False,\n mounts: Sequence[Mount] = (),\n stdout: _FILE = None,\n ) -> CompletedProcess:\n with finalize_source_mounts(\n context.config,\n ephemeral=os.getuid() == 0 and context.config.build_sources_ephemeral,\n ) as sources:\n return run(\n cls.cmd(context, \"apt-get\") + [operation, *arguments],\n sandbox=(\n context.sandbox(\n network=True,\n mounts=[Mount(context.root, \"/buildroot\"), *cls.mounts(context), *sources, *mounts],\n options=[\"--dir\", \"/work/src\", \"--chdir\", \"/work/src\"],\n ) + (apivfs_cmd() if apivfs else [])\n ),\n env=context.config.environment | cls.finalize_environment(context),\n stdout=stdout,\n )\n\n @classmethod\n def sync(cls, context: Context) -> None:\n cls.invoke(context, \"update\")\n\n @classmethod\n def createrepo(cls, context: Context) -> None:\n with (context.packages / \"Packages\").open(\"wb\") as f:\n run(\n [\"dpkg-scanpackages\", \".\"],\n stdout=f,\n sandbox=context.sandbox(\n mounts=[Mount(context.packages, context.packages, ro=True)],\n options=[\"--chdir\", context.packages],\n ),\n )\n\n (context.pkgmngr / \"etc/apt/sources.list.d\").mkdir(parents=True, exist_ok=True)\n (context.pkgmngr / \"etc/apt/sources.list.d/mkosi-local.sources\").write_text(\n textwrap.dedent(\n \"\"\"\\\n Enabled: yes\n Types: deb\n URIs: file:///work/packages\n Suites: ./\n Trusted: yes\n \"\"\"\n )\n )\n\n cls.invoke(\n context,\n \"update\",\n arguments=[\n \"-o\", \"Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources\",\n \"-o\", \"Dir::Etc::sourceparts=-\",\n \"-o\", \"APT::Get::List-Cleanup=0\",\n ],\n )\n", "path": "mkosi/installer/apt.py"}]} | 3,875 | 180 |
gh_patches_debug_27979 | rasdani/github-patches | git_diff | deepset-ai__haystack-4825 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GPT-4-32k support broken
**Describe the bug**
Token limit for `gpt-4-32k` gets set to 2049.
**Error message**
```
Traceback (most recent call last):
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py", line 552, in run
node_output, stream_id = self._run_node(node_id, node_input)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py", line 467, in _run_node
return self.graph.nodes[node_id]["component"]._dispatch_run(**node_input)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py", line 201, in _dispatch_run
return self._dispatch_run_general(self.run, **kwargs)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py", line 245, in _dispatch_run_general
output, stream = run_method(**run_inputs, **run_params)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 361, in run
results = self(prompt_collector=prompt_collector, **invocation_context)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 145, in __call__
return self.prompt(prompt_template, *args, **kwargs)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py", line 171, in prompt
prompt = self.prompt_model._ensure_token_limit(prompt)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_model.py", line 134, in _ensure_token_limit
return self.model_invocation_layer._ensure_token_limit(prompt=prompt)
File "/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/invocation_layer/chatgpt.py", line 120, in _ensure_token_limit
raise ValueError(
ValueError: The prompt or the messages are too long (12807 tokens). The length of the prompt or messages and the answer (100 tokens) should be within the max token limit (2049 tokens). Reduce the length of the prompt or messages.
```
**Expected behavior**
`gpt-4-32k` should support up to 32768 tokens.
**To Reproduce**
Pass more than 2049 tokens to a PromptNode using `gpt-4-32k`
</issue>
<code>
[start of haystack/utils/openai_utils.py]
1 """Utils for using OpenAI API"""
2 import os
3 import logging
4 import platform
5 import sys
6 import json
7 from typing import Dict, Union, Tuple, Optional, List
8 import requests
9 import tenacity
10 from transformers import GPT2TokenizerFast
11
12 from haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError
13 from haystack.environment import (
14 HAYSTACK_REMOTE_API_BACKOFF_SEC,
15 HAYSTACK_REMOTE_API_MAX_RETRIES,
16 HAYSTACK_REMOTE_API_TIMEOUT_SEC,
17 )
18
19 logger = logging.getLogger(__name__)
20
21
22 machine = platform.machine().lower()
23 system = platform.system()
24
25
26 OPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))
27 OPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))
28 OPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))
29
30
31 USE_TIKTOKEN = False
32 if sys.version_info >= (3, 8) and (machine in ["amd64", "x86_64"] or (machine == "arm64" and system == "Darwin")):
33 USE_TIKTOKEN = True
34
35 if USE_TIKTOKEN:
36 import tiktoken # pylint: disable=import-error
37 from tiktoken.model import MODEL_TO_ENCODING
38 else:
39 logger.warning(
40 "OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast."
41 )
42
43
44 def load_openai_tokenizer(tokenizer_name: str):
45 """Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast
46 from the transformers library.
47
48 :param tokenizer_name: The name of the tokenizer to load.
49 """
50 if USE_TIKTOKEN:
51 logger.debug("Using tiktoken %s tokenizer", tokenizer_name)
52 tokenizer = tiktoken.get_encoding(tokenizer_name)
53 else:
54 logger.debug("Using GPT2TokenizerFast tokenizer")
55 tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)
56 return tokenizer
57
58
59 def count_openai_tokens(text: str, tokenizer) -> int:
60 """Count the number of tokens in `text` based on the provided OpenAI `tokenizer`.
61
62 :param text: A string to be tokenized.
63 :param tokenizer: An OpenAI tokenizer.
64 """
65 if USE_TIKTOKEN:
66 return len(tokenizer.encode(text))
67 else:
68 return len(tokenizer.tokenize(text))
69
70
71 def count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:
72 """Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.
73
74 :param messages: The messages to be tokenized.
75 :param tokenizer: An OpenAI tokenizer.
76 """
77 # adapted from https://platform.openai.com/docs/guides/chat/introduction
78 # should be kept up to date
79 num_tokens = 0
80 for message in messages:
81 num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
82 for key, value in message.items():
83 if USE_TIKTOKEN:
84 num_tokens += len(tokenizer.encode(value))
85 else:
86 num_tokens += len(tokenizer.tokenize(value))
87 if key == "name": # if there's a name, the role is omitted
88 num_tokens += -1 # role is always required and always 1 token
89 num_tokens += 2 # every reply is primed with <im_start>assistant
90 return num_tokens
91
92
93 def _openai_text_completion_tokenization_details(model_name: str):
94 """Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.
95
96 :param model_name: Name of the OpenAI model.
97 """
98 tokenizer_name = "gpt2"
99 max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3
100 model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None
101
102 # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
103 if model_name == "gpt-35-turbo" and USE_TIKTOKEN:
104 model_tokenizer = "cl100k_base"
105
106 if model_tokenizer:
107 # Based on OpenAI models page, 'davinci' considers have 2049 tokens,
108 ## therefore, it is better to add `text-davinci` instead to the condition.
109 ## Ref: https://platform.openai.com/docs/models/gpt-3-5
110 ## https://platform.openai.com/docs/models/gpt-3
111 if "text-davinci" in model_name:
112 max_tokens_limit = 4097
113 tokenizer_name = model_tokenizer
114 elif model_name.startswith("gpt-3"):
115 max_tokens_limit = 4096
116 tokenizer_name = model_tokenizer
117 # Ref: https://platform.openai.com/docs/models/gpt-4
118 elif model_name.startswith("gpt-4-32k"):
119 max_tokens_limit = 32768 # tokens
120 tokenizer_name = model_tokenizer
121 elif model_name.startswith("gpt-4"):
122 max_tokens_limit = 8192 # tokens
123 tokenizer_name = model_tokenizer
124 else:
125 tokenizer_name = model_tokenizer
126
127 return tokenizer_name, max_tokens_limit
128
129
130 @tenacity.retry(
131 reraise=True,
132 retry=tenacity.retry_if_exception_type(OpenAIError)
133 and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),
134 wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),
135 stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),
136 )
137 def openai_request(
138 url: str,
139 headers: Dict,
140 payload: Dict,
141 timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,
142 read_response: Optional[bool] = True,
143 **kwargs,
144 ):
145 """Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.
146
147 :param url: The URL of the OpenAI API.
148 :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.
149 :param payload: The payload to send with the request.
150 :param timeout: The timeout length of the request. The default is 30s.
151 :param read_response: Whether to read the response as JSON. The default is True.
152 """
153 response = requests.request("POST", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)
154 if read_response:
155 json_response = json.loads(response.text)
156
157 if response.status_code != 200:
158 openai_error: OpenAIError
159 if response.status_code == 429:
160 openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
161 elif response.status_code == 401:
162 openai_error = OpenAIUnauthorizedError(f"API key is invalid: {response.text}")
163 else:
164 openai_error = OpenAIError(
165 f"OpenAI returned an error.\n"
166 f"Status code: {response.status_code}\n"
167 f"Response body: {response.text}",
168 status_code=response.status_code,
169 )
170 raise openai_error
171 if read_response:
172 return json_response
173 else:
174 return response
175
176
177 def _check_openai_finish_reason(result: Dict, payload: Dict) -> None:
178 """Check the `finish_reason` the answers returned by OpenAI completions endpoint.
179 If the `finish_reason` is `length` or `content_filter`, log a warning to the user.
180
181 :param result: The result returned from the OpenAI API.
182 :param payload: The payload sent to the OpenAI API.
183 """
184 number_of_truncated_completions = sum(1 for ans in result["choices"] if ans["finish_reason"] == "length")
185 if number_of_truncated_completions > 0:
186 logger.warning(
187 "%s out of the %s completions have been truncated before reaching a natural stopping point. "
188 "Increase the max_tokens parameter to allow for longer completions.",
189 number_of_truncated_completions,
190 payload["n"],
191 )
192
193 number_of_content_filtered_completions = sum(
194 1 for ans in result["choices"] if ans["finish_reason"] == "content_filter"
195 )
196 if number_of_content_filtered_completions > 0:
197 logger.warning(
198 "%s out of the %s completions have omitted content due to a flag from OpenAI content filters.",
199 number_of_truncated_completions,
200 payload["n"],
201 )
202
[end of haystack/utils/openai_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py
--- a/haystack/utils/openai_utils.py
+++ b/haystack/utils/openai_utils.py
@@ -34,7 +34,7 @@
if USE_TIKTOKEN:
import tiktoken # pylint: disable=import-error
- from tiktoken.model import MODEL_TO_ENCODING
+ from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING
else:
logger.warning(
"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast."
@@ -97,11 +97,18 @@
"""
tokenizer_name = "gpt2"
max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3
- model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None
- # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
- if model_name == "gpt-35-turbo" and USE_TIKTOKEN:
- model_tokenizer = "cl100k_base"
+ if USE_TIKTOKEN:
+ if model_name == "gpt-35-turbo":
+ # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72
+ model_tokenizer = "cl100k_base"
+ elif model_name in MODEL_TO_ENCODING:
+ model_tokenizer = MODEL_TO_ENCODING[model_name]
+ else:
+ for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():
+ if model_name.startswith(model_prefix):
+ model_tokenizer = tokenizer
+ break
if model_tokenizer:
# Based on OpenAI models page, 'davinci' considers have 2049 tokens,
| {"golden_diff": "diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py\n--- a/haystack/utils/openai_utils.py\n+++ b/haystack/utils/openai_utils.py\n@@ -34,7 +34,7 @@\n \n if USE_TIKTOKEN:\n import tiktoken # pylint: disable=import-error\n- from tiktoken.model import MODEL_TO_ENCODING\n+ from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING\n else:\n logger.warning(\n \"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast.\"\n@@ -97,11 +97,18 @@\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n- model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None\n \n- # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n- if model_name == \"gpt-35-turbo\" and USE_TIKTOKEN:\n- model_tokenizer = \"cl100k_base\"\n+ if USE_TIKTOKEN:\n+ if model_name == \"gpt-35-turbo\":\n+ # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n+ model_tokenizer = \"cl100k_base\"\n+ elif model_name in MODEL_TO_ENCODING:\n+ model_tokenizer = MODEL_TO_ENCODING[model_name]\n+ else:\n+ for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():\n+ if model_name.startswith(model_prefix):\n+ model_tokenizer = tokenizer\n+ break\n \n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n", "issue": "GPT-4-32k support broken\n**Describe the bug**\r\nToken limit for `gpt-4-32k` gets set to 2049.\r\n\r\n**Error message**\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py\", line 552, in run\r\n node_output, stream_id = self._run_node(node_id, node_input)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/pipelines/base.py\", line 467, in _run_node\r\n return self.graph.nodes[node_id][\"component\"]._dispatch_run(**node_input)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py\", line 201, in _dispatch_run\r\n return self._dispatch_run_general(self.run, **kwargs)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/base.py\", line 245, in _dispatch_run_general\r\n output, stream = run_method(**run_inputs, **run_params)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 361, in run\r\n results = self(prompt_collector=prompt_collector, **invocation_context)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 145, in __call__\r\n return self.prompt(prompt_template, *args, **kwargs)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_node.py\", line 171, in prompt\r\n prompt = self.prompt_model._ensure_token_limit(prompt)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/prompt_model.py\", line 134, in _ensure_token_limit\r\n return self.model_invocation_layer._ensure_token_limit(prompt=prompt)\r\n File \"/home/dnetguru/.conda/envs/thunk/lib/python3.10/site-packages/haystack/nodes/prompt/invocation_layer/chatgpt.py\", line 120, in _ensure_token_limit\r\n raise ValueError(\r\nValueError: The prompt or the messages are too long (12807 tokens). The length of the prompt or messages and the answer (100 tokens) should be within the max token limit (2049 tokens). Reduce the length of the prompt or messages.\r\n```\r\n\r\n**Expected behavior**\r\n`gpt-4-32k` should support up to 32768 tokens.\r\n\r\n**To Reproduce**\r\nPass more than 2049 tokens to a PromptNode using `gpt-4-32k`\n", "before_files": [{"content": "\"\"\"Utils for using OpenAI API\"\"\"\nimport os\nimport logging\nimport platform\nimport sys\nimport json\nfrom typing import Dict, Union, Tuple, Optional, List\nimport requests\nimport tenacity\nfrom transformers import GPT2TokenizerFast\n\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError\nfrom haystack.environment import (\n HAYSTACK_REMOTE_API_BACKOFF_SEC,\n HAYSTACK_REMOTE_API_MAX_RETRIES,\n HAYSTACK_REMOTE_API_TIMEOUT_SEC,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nmachine = platform.machine().lower()\nsystem = platform.system()\n\n\nOPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))\nOPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))\nOPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))\n\n\nUSE_TIKTOKEN = False\nif sys.version_info >= (3, 8) and (machine in [\"amd64\", \"x86_64\"] or (machine == \"arm64\" and system == \"Darwin\")):\n USE_TIKTOKEN = True\n\nif USE_TIKTOKEN:\n import tiktoken # pylint: disable=import-error\n from tiktoken.model import MODEL_TO_ENCODING\nelse:\n logger.warning(\n \"OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and AARCH64. Falling back to GPT2TokenizerFast.\"\n )\n\n\ndef load_openai_tokenizer(tokenizer_name: str):\n \"\"\"Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast\n from the transformers library.\n\n :param tokenizer_name: The name of the tokenizer to load.\n \"\"\"\n if USE_TIKTOKEN:\n logger.debug(\"Using tiktoken %s tokenizer\", tokenizer_name)\n tokenizer = tiktoken.get_encoding(tokenizer_name)\n else:\n logger.debug(\"Using GPT2TokenizerFast tokenizer\")\n tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)\n return tokenizer\n\n\ndef count_openai_tokens(text: str, tokenizer) -> int:\n \"\"\"Count the number of tokens in `text` based on the provided OpenAI `tokenizer`.\n\n :param text: A string to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n if USE_TIKTOKEN:\n return len(tokenizer.encode(text))\n else:\n return len(tokenizer.tokenize(text))\n\n\ndef count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:\n \"\"\"Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.\n\n :param messages: The messages to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n # adapted from https://platform.openai.com/docs/guides/chat/introduction\n # should be kept up to date\n num_tokens = 0\n for message in messages:\n num_tokens += 4 # every message follows <im_start>{role/name}\\n{content}<im_end>\\n\n for key, value in message.items():\n if USE_TIKTOKEN:\n num_tokens += len(tokenizer.encode(value))\n else:\n num_tokens += len(tokenizer.tokenize(value))\n if key == \"name\": # if there's a name, the role is omitted\n num_tokens += -1 # role is always required and always 1 token\n num_tokens += 2 # every reply is primed with <im_start>assistant\n return num_tokens\n\n\ndef _openai_text_completion_tokenization_details(model_name: str):\n \"\"\"Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.\n\n :param model_name: Name of the OpenAI model.\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n model_tokenizer = MODEL_TO_ENCODING.get(model_name) if USE_TIKTOKEN else None\n\n # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n if model_name == \"gpt-35-turbo\" and USE_TIKTOKEN:\n model_tokenizer = \"cl100k_base\"\n\n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n ## therefore, it is better to add `text-davinci` instead to the condition.\n ## Ref: https://platform.openai.com/docs/models/gpt-3-5\n ## https://platform.openai.com/docs/models/gpt-3\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n # Ref: https://platform.openai.com/docs/models/gpt-4\n elif model_name.startswith(\"gpt-4-32k\"):\n max_tokens_limit = 32768 # tokens\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-4\"):\n max_tokens_limit = 8192 # tokens\n tokenizer_name = model_tokenizer\n else:\n tokenizer_name = model_tokenizer\n\n return tokenizer_name, max_tokens_limit\n\n\[email protected](\n reraise=True,\n retry=tenacity.retry_if_exception_type(OpenAIError)\n and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),\n wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),\n stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),\n)\ndef openai_request(\n url: str,\n headers: Dict,\n payload: Dict,\n timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,\n read_response: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.\n\n :param url: The URL of the OpenAI API.\n :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.\n :param payload: The payload to send with the request.\n :param timeout: The timeout length of the request. The default is 30s.\n :param read_response: Whether to read the response as JSON. The default is True.\n \"\"\"\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)\n if read_response:\n json_response = json.loads(response.text)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n elif response.status_code == 401:\n openai_error = OpenAIUnauthorizedError(f\"API key is invalid: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n if read_response:\n return json_response\n else:\n return response\n\n\ndef _check_openai_finish_reason(result: Dict, payload: Dict) -> None:\n \"\"\"Check the `finish_reason` the answers returned by OpenAI completions endpoint.\n If the `finish_reason` is `length` or `content_filter`, log a warning to the user.\n\n :param result: The result returned from the OpenAI API.\n :param payload: The payload sent to the OpenAI API.\n \"\"\"\n number_of_truncated_completions = sum(1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"length\")\n if number_of_truncated_completions > 0:\n logger.warning(\n \"%s out of the %s completions have been truncated before reaching a natural stopping point. \"\n \"Increase the max_tokens parameter to allow for longer completions.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n\n number_of_content_filtered_completions = sum(\n 1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"content_filter\"\n )\n if number_of_content_filtered_completions > 0:\n logger.warning(\n \"%s out of the %s completions have omitted content due to a flag from OpenAI content filters.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n", "path": "haystack/utils/openai_utils.py"}]} | 3,644 | 440 |
gh_patches_debug_15914 | rasdani/github-patches | git_diff | keras-team__keras-8240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix device names for multi_gpu_model
This PR will try to fix #8213.
`DeviceSpec.from_string(device).to_string()` is used by tensorflow. ([Ref](https://github.com/tensorflow/tensorflow/blob/40c475b48c091a70ad8061c1508dff6ded2d2af6/tensorflow/python/framework/device.py#L251))
</issue>
<code>
[start of keras/utils/training_utils.py]
1 from ..layers.merge import concatenate
2 from .. import backend as K
3 from ..layers.core import Lambda
4 from ..engine.training import Model
5
6
7 def _get_available_devices():
8 from tensorflow.python.client import device_lib
9 local_device_protos = device_lib.list_local_devices()
10 return [x.name for x in local_device_protos]
11
12
13 def multi_gpu_model(model, gpus):
14 """Replicates a model on different GPUs.
15
16 Specifically, this function implements single-machine
17 multi-GPU data parallelism. It works in the following way:
18
19 - Divide the model's input(s) into multiple sub-batches.
20 - Apply a model copy on each sub-batch. Every model copy
21 is executed on a dedicated GPU.
22 - Concatenate the results (on CPU) into one big batch.
23
24 E.g. if your `batch_size` is 64 and you use `gpus=2`,
25 then we will divide the input into 2 sub-batches of 32 samples,
26 process each sub-batch on one GPU, then return the full
27 batch of 64 processed samples.
28
29 This induces quasi-linear speedup on up to 8 GPUs.
30
31 This function is only available with the TensorFlow backend
32 for the time being.
33
34 # Arguments
35 model: A Keras model instance. To avoid OOM errors,
36 this model could have been built on CPU, for instance
37 (see usage example below).
38 gpus: Integer >= 2, number of on GPUs on which to create
39 model replicas.
40
41 # Returns
42 A Keras `Model` instance which can be used just like the initial
43 `model` argument, but which distributes its workload on multiple GPUs.
44
45 # Example
46
47 ```python
48 import tensorflow as tf
49 from keras.applications import Xception
50 from keras.utils import multi_gpu_model
51 import numpy as np
52
53 num_samples = 1000
54 height = 224
55 width = 224
56 num_classes = 1000
57
58 # Instantiate the base model
59 # (here, we do it on CPU, which is optional).
60 with tf.device('/cpu:0'):
61 model = Xception(weights=None,
62 input_shape=(height, width, 3),
63 classes=num_classes)
64
65 # Replicates the model on 8 GPUs.
66 # This assumes that your machine has 8 available GPUs.
67 parallel_model = multi_gpu_model(model, gpus=8)
68 parallel_model.compile(loss='categorical_crossentropy',
69 optimizer='rmsprop')
70
71 # Generate dummy data.
72 x = np.random.random((num_samples, height, width, 3))
73 y = np.random.random((num_samples, num_classes))
74
75 # This `fit` call will be distributed on 8 GPUs.
76 # Since the batch size is 256, each GPU will process 32 samples.
77 parallel_model.fit(x, y, epochs=20, batch_size=256)
78 ```
79 """
80 if K.backend() != 'tensorflow':
81 raise ValueError('`multi_gpu_model` is only available '
82 'with the TensorFlow backend.')
83 if gpus <= 1:
84 raise ValueError('For multi-gpu usage to be effective, '
85 'call `multi_gpu_model` with `gpus >= 2`. '
86 'Received: `gpus=%d`' % gpus)
87
88 import tensorflow as tf
89
90 target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]
91 available_devices = _get_available_devices()
92 for device in target_devices:
93 if device not in available_devices:
94 raise ValueError(
95 'To call `multi_gpu_model` with `gpus=%d`, '
96 'we expect the following devices to be available: %s. '
97 'However this machine only has: %s. '
98 'Try reducing `gpus`.' % (gpus,
99 target_devices,
100 available_devices))
101
102 def get_slice(data, i, parts):
103 shape = tf.shape(data)
104 batch_size = shape[:1]
105 input_shape = shape[1:]
106 step = batch_size // parts
107 if i == gpus - 1:
108 size = batch_size - step * i
109 else:
110 size = step
111 size = tf.concat([size, input_shape], axis=0)
112 stride = tf.concat([step, input_shape * 0], axis=0)
113 start = stride * i
114 return tf.slice(data, start, size)
115
116 all_outputs = []
117 for i in range(len(model.outputs)):
118 all_outputs.append([])
119
120 # Place a copy of the model on each GPU,
121 # each getting a slice of the inputs.
122 for i in range(gpus):
123 with tf.device('/gpu:%d' % i):
124 with tf.name_scope('replica_%d' % i):
125 inputs = []
126 # Retrieve a slice of the input.
127 for x in model.inputs:
128 input_shape = tuple(x.get_shape().as_list())[1:]
129 slice_i = Lambda(get_slice,
130 output_shape=input_shape,
131 arguments={'i': i,
132 'parts': gpus})(x)
133 inputs.append(slice_i)
134
135 # Apply model on slice
136 # (creating a model replica on the target device).
137 outputs = model(inputs)
138 if not isinstance(outputs, list):
139 outputs = [outputs]
140
141 # Save the outputs for merging back together later.
142 for o in range(len(outputs)):
143 all_outputs[o].append(outputs[o])
144
145 # Merge outputs on CPU.
146 with tf.device('/cpu:0'):
147 merged = []
148 for outputs in all_outputs:
149 merged.append(concatenate(outputs,
150 axis=0))
151 return Model(model.inputs, merged)
152
[end of keras/utils/training_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/utils/training_utils.py b/keras/utils/training_utils.py
--- a/keras/utils/training_utils.py
+++ b/keras/utils/training_utils.py
@@ -10,6 +10,11 @@
return [x.name for x in local_device_protos]
+def _normalize_device_name(name):
+ name = name.lower().replace('device:', '')
+ return name
+
+
def multi_gpu_model(model, gpus):
"""Replicates a model on different GPUs.
@@ -89,6 +94,7 @@
target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]
available_devices = _get_available_devices()
+ available_devices = [_normalize_device_name(name) for name in available_devices]
for device in target_devices:
if device not in available_devices:
raise ValueError(
| {"golden_diff": "diff --git a/keras/utils/training_utils.py b/keras/utils/training_utils.py\n--- a/keras/utils/training_utils.py\n+++ b/keras/utils/training_utils.py\n@@ -10,6 +10,11 @@\n return [x.name for x in local_device_protos]\n \n \n+def _normalize_device_name(name):\n+ name = name.lower().replace('device:', '')\n+ return name\n+\n+\n def multi_gpu_model(model, gpus):\n \"\"\"Replicates a model on different GPUs.\n \n@@ -89,6 +94,7 @@\n \n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]\n available_devices = _get_available_devices()\n+ available_devices = [_normalize_device_name(name) for name in available_devices]\n for device in target_devices:\n if device not in available_devices:\n raise ValueError(\n", "issue": "fix device names for multi_gpu_model\nThis PR will try to fix #8213.\r\n\r\n`DeviceSpec.from_string(device).to_string()` is used by tensorflow. ([Ref](https://github.com/tensorflow/tensorflow/blob/40c475b48c091a70ad8061c1508dff6ded2d2af6/tensorflow/python/framework/device.py#L251))\n", "before_files": [{"content": "from ..layers.merge import concatenate\nfrom .. import backend as K\nfrom ..layers.core import Lambda\nfrom ..engine.training import Model\n\n\ndef _get_available_devices():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos]\n\n\ndef multi_gpu_model(model, gpus):\n \"\"\"Replicates a model on different GPUs.\n\n Specifically, this function implements single-machine\n multi-GPU data parallelism. It works in the following way:\n\n - Divide the model's input(s) into multiple sub-batches.\n - Apply a model copy on each sub-batch. Every model copy\n is executed on a dedicated GPU.\n - Concatenate the results (on CPU) into one big batch.\n\n E.g. if your `batch_size` is 64 and you use `gpus=2`,\n then we will divide the input into 2 sub-batches of 32 samples,\n process each sub-batch on one GPU, then return the full\n batch of 64 processed samples.\n\n This induces quasi-linear speedup on up to 8 GPUs.\n\n This function is only available with the TensorFlow backend\n for the time being.\n\n # Arguments\n model: A Keras model instance. To avoid OOM errors,\n this model could have been built on CPU, for instance\n (see usage example below).\n gpus: Integer >= 2, number of on GPUs on which to create\n model replicas.\n\n # Returns\n A Keras `Model` instance which can be used just like the initial\n `model` argument, but which distributes its workload on multiple GPUs.\n\n # Example\n\n ```python\n import tensorflow as tf\n from keras.applications import Xception\n from keras.utils import multi_gpu_model\n import numpy as np\n\n num_samples = 1000\n height = 224\n width = 224\n num_classes = 1000\n\n # Instantiate the base model\n # (here, we do it on CPU, which is optional).\n with tf.device('/cpu:0'):\n model = Xception(weights=None,\n input_shape=(height, width, 3),\n classes=num_classes)\n\n # Replicates the model on 8 GPUs.\n # This assumes that your machine has 8 available GPUs.\n parallel_model = multi_gpu_model(model, gpus=8)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop')\n\n # Generate dummy data.\n x = np.random.random((num_samples, height, width, 3))\n y = np.random.random((num_samples, num_classes))\n\n # This `fit` call will be distributed on 8 GPUs.\n # Since the batch size is 256, each GPU will process 32 samples.\n parallel_model.fit(x, y, epochs=20, batch_size=256)\n ```\n \"\"\"\n if K.backend() != 'tensorflow':\n raise ValueError('`multi_gpu_model` is only available '\n 'with the TensorFlow backend.')\n if gpus <= 1:\n raise ValueError('For multi-gpu usage to be effective, '\n 'call `multi_gpu_model` with `gpus >= 2`. '\n 'Received: `gpus=%d`' % gpus)\n\n import tensorflow as tf\n\n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in range(gpus)]\n available_devices = _get_available_devices()\n for device in target_devices:\n if device not in available_devices:\n raise ValueError(\n 'To call `multi_gpu_model` with `gpus=%d`, '\n 'we expect the following devices to be available: %s. '\n 'However this machine only has: %s. '\n 'Try reducing `gpus`.' % (gpus,\n target_devices,\n available_devices))\n\n def get_slice(data, i, parts):\n shape = tf.shape(data)\n batch_size = shape[:1]\n input_shape = shape[1:]\n step = batch_size // parts\n if i == gpus - 1:\n size = batch_size - step * i\n else:\n size = step\n size = tf.concat([size, input_shape], axis=0)\n stride = tf.concat([step, input_shape * 0], axis=0)\n start = stride * i\n return tf.slice(data, start, size)\n\n all_outputs = []\n for i in range(len(model.outputs)):\n all_outputs.append([])\n\n # Place a copy of the model on each GPU,\n # each getting a slice of the inputs.\n for i in range(gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('replica_%d' % i):\n inputs = []\n # Retrieve a slice of the input.\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_i = Lambda(get_slice,\n output_shape=input_shape,\n arguments={'i': i,\n 'parts': gpus})(x)\n inputs.append(slice_i)\n\n # Apply model on slice\n # (creating a model replica on the target device).\n outputs = model(inputs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save the outputs for merging back together later.\n for o in range(len(outputs)):\n all_outputs[o].append(outputs[o])\n\n # Merge outputs on CPU.\n with tf.device('/cpu:0'):\n merged = []\n for outputs in all_outputs:\n merged.append(concatenate(outputs,\n axis=0))\n return Model(model.inputs, merged)\n", "path": "keras/utils/training_utils.py"}]} | 2,233 | 201 |
gh_patches_debug_12524 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python supported versions need to be updated in docs
Not really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.
Copy-pasting from https://pypi.org/project/python-telegram-bot/ :
> This library provides a pure Python interface for the Telegram Bot API. It’s compatible with Python versions 2.7, 3.3+ and PyPy.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 from setuptools import setup, find_packages
7
8
9 def requirements():
10 """Build the requirements list for this project"""
11 requirements_list = []
12
13 with open('requirements.txt') as requirements:
14 for install in requirements:
15 requirements_list.append(install.strip())
16
17 return requirements_list
18
19
20 packages = find_packages(exclude=['tests*'])
21
22 with codecs.open('README.rst', 'r', 'utf-8') as fd:
23 fn = os.path.join('telegram', 'version.py')
24 with open(fn) as fh:
25 code = compile(fh.read(), fn, 'exec')
26 exec(code)
27
28 setup(name='python-telegram-bot',
29 version=__version__,
30 author='Leandro Toledo',
31 author_email='[email protected]',
32 license='LGPLv3',
33 url='https://python-telegram-bot.org/',
34 keywords='python telegram bot api wrapper',
35 description="We have made you a wrapper you can't refuse",
36 long_description=fd.read(),
37 packages=packages,
38 install_requires=requirements(),
39 extras_require={
40 'json': 'ujson',
41 'socks': 'PySocks'
42 },
43 include_package_data=True,
44 classifiers=[
45 'Development Status :: 5 - Production/Stable',
46 'Intended Audience :: Developers',
47 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
48 'Operating System :: OS Independent',
49 'Topic :: Software Development :: Libraries :: Python Modules',
50 'Topic :: Communications :: Chat',
51 'Topic :: Internet',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: 3.7'
60 ],)
61
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,10 +50,7 @@
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,10 +50,7 @@\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n", "issue": "Python supported versions need to be updated in docs\nNot really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.\r\n\r\nCopy-pasting from https://pypi.org/project/python-telegram-bot/ :\r\n\r\n> This library provides a pure Python interface for the Telegram Bot API. It\u2019s compatible with Python versions 2.7, 3.3+ and PyPy.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements(),\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ],)\n", "path": "setup.py"}]} | 1,177 | 137 |
gh_patches_debug_37494 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-1437 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
vmware_content_library_info: Only lists Content Libraries with the type of "Local", does not include "Subscribed" type
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When using the vmware_content_library_info task type to query our Content Libraries, only the Libraries with the type of "Local" are reported back to the ansible task. We used shared or "Subscribed" Content Libraries in our environment, to share a consistent Library of VM Templates between all of our vCenters.
How can we get this functionality added?
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
vmware_content_library_info
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
ansible 2.10.9
config file = /home/<redacted>/.ansible.cfg
configured module search path = ['/home/<redacted>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/<redacted>/.local/lib/python3.8/site-packages/ansible
executable location = /home/<redacted>/.local/bin/ansible
python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0]
```
##### COLLECTION VERSION
<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
for example: ansible-galaxy collection list community.general
-->
```paste below
# /usr/local/lib/python3.8/dist-packages/ansible_collections
Collection Version
---------------- -------
community.vmware 1.10.0
# /home/<redacted>/.local/lib/python3.8/site-packages/ansible_collections
Collection Version
---------------- -------
community.vmware 1.10.0
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```paste below
DEFAULT_HOST_LIST(/home/<redacted>/.ansible.cfg) = ['/home/<redacted>/inventory']
DEFAULT_LOG_PATH(/home/<redacted>/.ansible.cfg) = /home/<redacted>/.ansible/logs/log.txt
DEFAULT_TIMEOUT(/home/<redacted>/.ansible.cfg) = 120
DEFAULT_VAULT_PASSWORD_FILE(/home/<redacted>/.ansible.cfg) = /home/<redacted>/playbooks/secret.yaml
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
vCenter Version - 7.0.3 build 20150588
Client - vSphere Client version 7.0.3.00700
Hosts - VMware ESXi, 7.0.3, 20036589
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: "Collect list of Content Libraries from specified vCenter"
community.vmware.vmware_content_library_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
register: libraries
- name: "Display list of found Content Libraries"
debug:
var: libraries
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```yaml
TASK [Display list of found Content Libraries] ****************************************************************************************************************************
ok: [localhost] => {
"libraries": {
"changed": false,
"content_libs": [
"6b5e0c60-3173-4a75-8101-33335f3bb7dd",
"7bd40369-84d6-4fd5-9cf9-7c33377f3931"
],
"failed": false
}
}
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```yaml
TASK [Display list of found Content Libraries] ****************************************************************************************************************************
ok: [localhost] => {
"libraries": {
"changed": false,
"content_libs": [
"6b5e0c60-3173-4a75-8101-33335f3bb7dd"
],
"failed": false
}
}
```
</issue>
<code>
[start of plugins/modules/vmware_content_library_info.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2019, Ansible Project
5 # Copyright: (c) 2019, Pavan Bidkar <[email protected]>
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12
13 DOCUMENTATION = r'''
14 ---
15 module: vmware_content_library_info
16 short_description: Gather information about VMWare Content Library
17 description:
18 - Module to list the content libraries.
19 - Module to get information about specific content library.
20 - Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
21 - All variables and VMware object names are case sensitive.
22 author:
23 - Pavan Bidkar (@pgbidkar)
24 requirements:
25 - vSphere Automation SDK
26 options:
27 library_id:
28 description:
29 - content library id for which details needs to be fetched.
30 type: str
31 required: False
32 extends_documentation_fragment:
33 - community.vmware.vmware_rest_client.documentation
34
35 '''
36
37 EXAMPLES = r'''
38 - name: Get List of Content Libraries
39 community.vmware.vmware_content_library_info:
40 hostname: '{{ vcenter_hostname }}'
41 username: '{{ vcenter_username }}'
42 password: '{{ vcenter_password }}'
43 delegate_to: localhost
44
45 - name: Get information about content library
46 community.vmware.vmware_content_library_info:
47 hostname: '{{ vcenter_hostname }}'
48 username: '{{ vcenter_username }}'
49 password: '{{ vcenter_password }}'
50 library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'
51 delegate_to: localhost
52 '''
53
54 RETURN = r'''
55 content_lib_details:
56 description: list of content library metadata
57 returned: on success
58 type: list
59 sample: [
60 {
61 "library_creation_time": "2019-07-02T11:50:52.242000",
62 "library_description": "new description",
63 "library_id": "13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8",
64 "library_name": "demo-local-lib",
65 "library_publish_info": {
66 "authentication_method": "NONE",
67 "persist_json_enabled": false,
68 "publish_url": null,
69 "published": false,
70 "user_name": null
71 },
72 "library_server_guid": "0fd5813b-aac7-4b92-9fb7-f18f16565613",
73 "library_type": "LOCAL",
74 "library_version": "3"
75 }
76 ]
77 content_libs:
78 description: list of content libraries
79 returned: on success
80 type: list
81 sample: [
82 "ded9c4d5-0dcd-4837-b1d8-af7398511e33",
83 "36b72549-14ed-4b5f-94cb-6213fecacc02"
84 ]
85 '''
86
87 from ansible.module_utils.basic import AnsibleModule
88 from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
89
90
91 class VmwareContentLibInfo(VmwareRestClient):
92 def __init__(self, module):
93 """Constructor."""
94 super(VmwareContentLibInfo, self).__init__(module)
95 self.content_service = self.api_client
96 self.library_info = []
97
98 def get_all_content_libs(self):
99 """Method to retrieve List of content libraries."""
100 self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())
101
102 def get_content_lib_details(self, library_id):
103 """Method to retrieve Details of contentlib with library_id"""
104 try:
105 lib_details = self.content_service.content.LocalLibrary.get(library_id)
106 except Exception as e:
107 self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
108 lib_publish_info = dict(
109 persist_json_enabled=lib_details.publish_info.persist_json_enabled,
110 authentication_method=lib_details.publish_info.authentication_method,
111 publish_url=lib_details.publish_info.publish_url,
112 published=lib_details.publish_info.published,
113 user_name=lib_details.publish_info.user_name
114 )
115 self.library_info.append(
116 dict(
117 library_name=lib_details.name,
118 library_description=lib_details.description,
119 library_id=lib_details.id,
120 library_type=lib_details.type,
121 library_creation_time=lib_details.creation_time,
122 library_server_guid=lib_details.server_guid,
123 library_version=lib_details.version,
124 library_publish_info=lib_publish_info
125 )
126 )
127
128 self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)
129
130
131 def main():
132 argument_spec = VmwareRestClient.vmware_client_argument_spec()
133 argument_spec.update(
134 library_id=dict(type='str', required=False),
135 )
136 module = AnsibleModule(argument_spec=argument_spec,
137 supports_check_mode=True)
138
139 vmware_contentlib_info = VmwareContentLibInfo(module)
140 if module.params.get('library_id'):
141 vmware_contentlib_info.get_content_lib_details(module.params['library_id'])
142 else:
143 vmware_contentlib_info.get_all_content_libs()
144
145
146 if __name__ == '__main__':
147 main()
148
[end of plugins/modules/vmware_content_library_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/vmware_content_library_info.py b/plugins/modules/vmware_content_library_info.py
--- a/plugins/modules/vmware_content_library_info.py
+++ b/plugins/modules/vmware_content_library_info.py
@@ -93,25 +93,46 @@
"""Constructor."""
super(VmwareContentLibInfo, self).__init__(module)
self.content_service = self.api_client
+ self.local_content_libraries = self.content_service.content.LocalLibrary.list()
+ if self.local_content_libraries is None:
+ self.local_content_libraries = []
+
+ self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()
+ if self.subscribed_content_libraries is None:
+ self.subscribed_content_libraries = []
+
self.library_info = []
def get_all_content_libs(self):
"""Method to retrieve List of content libraries."""
- self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())
+ content_libraries = self.local_content_libraries + self.subscribed_content_libraries
+
+ self.module.exit_json(changed=False, content_libs=content_libraries)
def get_content_lib_details(self, library_id):
"""Method to retrieve Details of contentlib with library_id"""
- try:
- lib_details = self.content_service.content.LocalLibrary.get(library_id)
- except Exception as e:
- self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
- lib_publish_info = dict(
- persist_json_enabled=lib_details.publish_info.persist_json_enabled,
- authentication_method=lib_details.publish_info.authentication_method,
- publish_url=lib_details.publish_info.publish_url,
- published=lib_details.publish_info.published,
- user_name=lib_details.publish_info.user_name
- )
+ lib_publish_info = None
+
+ if library_id in self.local_content_libraries:
+ try:
+ lib_details = self.content_service.content.LocalLibrary.get(library_id)
+ lib_publish_info = dict(
+ persist_json_enabled=lib_details.publish_info.persist_json_enabled,
+ authentication_method=lib_details.publish_info.authentication_method,
+ publish_url=lib_details.publish_info.publish_url,
+ published=lib_details.publish_info.published,
+ user_name=lib_details.publish_info.user_name
+ )
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ elif library_id in self.subscribed_content_libraries:
+ try:
+ lib_details = self.content_service.content.SubscribedLibrary.get(library_id)
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ else:
+ self.module.fail_json(exists=False, msg="Library %s not found." % library_id)
+
self.library_info.append(
dict(
library_name=lib_details.name,
| {"golden_diff": "diff --git a/plugins/modules/vmware_content_library_info.py b/plugins/modules/vmware_content_library_info.py\n--- a/plugins/modules/vmware_content_library_info.py\n+++ b/plugins/modules/vmware_content_library_info.py\n@@ -93,25 +93,46 @@\n \"\"\"Constructor.\"\"\"\n super(VmwareContentLibInfo, self).__init__(module)\n self.content_service = self.api_client\n+ self.local_content_libraries = self.content_service.content.LocalLibrary.list()\n+ if self.local_content_libraries is None:\n+ self.local_content_libraries = []\n+\n+ self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()\n+ if self.subscribed_content_libraries is None:\n+ self.subscribed_content_libraries = []\n+\n self.library_info = []\n \n def get_all_content_libs(self):\n \"\"\"Method to retrieve List of content libraries.\"\"\"\n- self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())\n+ content_libraries = self.local_content_libraries + self.subscribed_content_libraries\n+\n+ self.module.exit_json(changed=False, content_libs=content_libraries)\n \n def get_content_lib_details(self, library_id):\n \"\"\"Method to retrieve Details of contentlib with library_id\"\"\"\n- try:\n- lib_details = self.content_service.content.LocalLibrary.get(library_id)\n- except Exception as e:\n- self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n- lib_publish_info = dict(\n- persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n- authentication_method=lib_details.publish_info.authentication_method,\n- publish_url=lib_details.publish_info.publish_url,\n- published=lib_details.publish_info.published,\n- user_name=lib_details.publish_info.user_name\n- )\n+ lib_publish_info = None\n+\n+ if library_id in self.local_content_libraries:\n+ try:\n+ lib_details = self.content_service.content.LocalLibrary.get(library_id)\n+ lib_publish_info = dict(\n+ persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n+ authentication_method=lib_details.publish_info.authentication_method,\n+ publish_url=lib_details.publish_info.publish_url,\n+ published=lib_details.publish_info.published,\n+ user_name=lib_details.publish_info.user_name\n+ )\n+ except Exception as e:\n+ self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n+ elif library_id in self.subscribed_content_libraries:\n+ try:\n+ lib_details = self.content_service.content.SubscribedLibrary.get(library_id)\n+ except Exception as e:\n+ self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n+ else:\n+ self.module.fail_json(exists=False, msg=\"Library %s not found.\" % library_id)\n+\n self.library_info.append(\n dict(\n library_name=lib_details.name,\n", "issue": "vmware_content_library_info: Only lists Content Libraries with the type of \"Local\", does not include \"Subscribed\" type\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\nWhen using the vmware_content_library_info task type to query our Content Libraries, only the Libraries with the type of \"Local\" are reported back to the ansible task. We used shared or \"Subscribed\" Content Libraries in our environment, to share a consistent Library of VM Templates between all of our vCenters.\r\n\r\nHow can we get this functionality added?\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nvmware_content_library_info\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```paste below\r\nansible 2.10.9\r\n config file = /home/<redacted>/.ansible.cfg\r\n configured module search path = ['/home/<redacted>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/<redacted>/.local/lib/python3.8/site-packages/ansible\r\n executable location = /home/<redacted>/.local/bin/ansible\r\n python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0]\r\n```\r\n\r\n##### COLLECTION VERSION\r\n<!--- Paste verbatim output from \"ansible-galaxy collection list <namespace>.<collection>\" between the quotes\r\nfor example: ansible-galaxy collection list community.general\r\n-->\r\n```paste below\r\n\r\n# /usr/local/lib/python3.8/dist-packages/ansible_collections\r\nCollection Version\r\n---------------- -------\r\ncommunity.vmware 1.10.0\r\n\r\n# /home/<redacted>/.local/lib/python3.8/site-packages/ansible_collections\r\nCollection Version\r\n---------------- -------\r\ncommunity.vmware 1.10.0\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```paste below\r\nDEFAULT_HOST_LIST(/home/<redacted>/.ansible.cfg) = ['/home/<redacted>/inventory']\r\nDEFAULT_LOG_PATH(/home/<redacted>/.ansible.cfg) = /home/<redacted>/.ansible/logs/log.txt\r\nDEFAULT_TIMEOUT(/home/<redacted>/.ansible.cfg) = 120\r\nDEFAULT_VAULT_PASSWORD_FILE(/home/<redacted>/.ansible.cfg) = /home/<redacted>/playbooks/secret.yaml\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nvCenter Version - 7.0.3 build 20150588\r\nClient - vSphere Client version 7.0.3.00700\r\nHosts - VMware ESXi, 7.0.3, 20036589\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n - name: \"Collect list of Content Libraries from specified vCenter\"\r\n community.vmware.vmware_content_library_info:\r\n hostname: \"{{ hostname }}\"\r\n username: \"{{ username }}\"\r\n password: \"{{ password }}\"\r\n validate_certs: no\r\n register: libraries\r\n\r\n - name: \"Display list of found Content Libraries\"\r\n debug:\r\n var: libraries\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n```yaml\r\nTASK [Display list of found Content Libraries] ****************************************************************************************************************************\r\nok: [localhost] => {\r\n \"libraries\": {\r\n \"changed\": false,\r\n \"content_libs\": [\r\n \"6b5e0c60-3173-4a75-8101-33335f3bb7dd\",\r\n \"7bd40369-84d6-4fd5-9cf9-7c33377f3931\"\r\n ],\r\n \"failed\": false\r\n }\r\n}\r\n```\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```yaml\r\nTASK [Display list of found Content Libraries] ****************************************************************************************************************************\r\nok: [localhost] => {\r\n \"libraries\": {\r\n \"changed\": false,\r\n \"content_libs\": [\r\n \"6b5e0c60-3173-4a75-8101-33335f3bb7dd\"\r\n ],\r\n \"failed\": false\r\n }\r\n}\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, Pavan Bidkar <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_content_library_info\nshort_description: Gather information about VMWare Content Library\ndescription:\n- Module to list the content libraries.\n- Module to get information about specific content library.\n- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nauthor:\n- Pavan Bidkar (@pgbidkar)\nrequirements:\n- vSphere Automation SDK\noptions:\n library_id:\n description:\n - content library id for which details needs to be fetched.\n type: str\n required: False\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Get List of Content Libraries\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n delegate_to: localhost\n\n- name: Get information about content library\n community.vmware.vmware_content_library_info:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'\n delegate_to: localhost\n'''\n\nRETURN = r'''\ncontent_lib_details:\n description: list of content library metadata\n returned: on success\n type: list\n sample: [\n {\n \"library_creation_time\": \"2019-07-02T11:50:52.242000\",\n \"library_description\": \"new description\",\n \"library_id\": \"13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8\",\n \"library_name\": \"demo-local-lib\",\n \"library_publish_info\": {\n \"authentication_method\": \"NONE\",\n \"persist_json_enabled\": false,\n \"publish_url\": null,\n \"published\": false,\n \"user_name\": null\n },\n \"library_server_guid\": \"0fd5813b-aac7-4b92-9fb7-f18f16565613\",\n \"library_type\": \"LOCAL\",\n \"library_version\": \"3\"\n }\n ]\ncontent_libs:\n description: list of content libraries\n returned: on success\n type: list\n sample: [\n \"ded9c4d5-0dcd-4837-b1d8-af7398511e33\",\n \"36b72549-14ed-4b5f-94cb-6213fecacc02\"\n ]\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\n\n\nclass VmwareContentLibInfo(VmwareRestClient):\n def __init__(self, module):\n \"\"\"Constructor.\"\"\"\n super(VmwareContentLibInfo, self).__init__(module)\n self.content_service = self.api_client\n self.library_info = []\n\n def get_all_content_libs(self):\n \"\"\"Method to retrieve List of content libraries.\"\"\"\n self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())\n\n def get_content_lib_details(self, library_id):\n \"\"\"Method to retrieve Details of contentlib with library_id\"\"\"\n try:\n lib_details = self.content_service.content.LocalLibrary.get(library_id)\n except Exception as e:\n self.module.fail_json(exists=False, msg=\"%s\" % self.get_error_message(e))\n lib_publish_info = dict(\n persist_json_enabled=lib_details.publish_info.persist_json_enabled,\n authentication_method=lib_details.publish_info.authentication_method,\n publish_url=lib_details.publish_info.publish_url,\n published=lib_details.publish_info.published,\n user_name=lib_details.publish_info.user_name\n )\n self.library_info.append(\n dict(\n library_name=lib_details.name,\n library_description=lib_details.description,\n library_id=lib_details.id,\n library_type=lib_details.type,\n library_creation_time=lib_details.creation_time,\n library_server_guid=lib_details.server_guid,\n library_version=lib_details.version,\n library_publish_info=lib_publish_info\n )\n )\n\n self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n library_id=dict(type='str', required=False),\n )\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n vmware_contentlib_info = VmwareContentLibInfo(module)\n if module.params.get('library_id'):\n vmware_contentlib_info.get_content_lib_details(module.params['library_id'])\n else:\n vmware_contentlib_info.get_all_content_libs()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_content_library_info.py"}]} | 3,196 | 640 |
gh_patches_debug_17744 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ElasticAPM crash because of a missing context since version 6.3.0
**Describe the bug**:
Since ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash.
Here's a snippet of a strack trace from our app:
```
...
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 168, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1026, in get
"GET", _make_path(index, doc_type, id), params=params, headers=headers
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 113, in call
result_data = wrapped(*args, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py", line 388, in perform_request
timeout=timeout,
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 56, in call
self._update_context_by_request_data(span.context, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 72, in _update_context_by_request_data
context["db"] = {"type": "elasticsearch"}
TypeError: 'NoneType' object does not support item assignment
```
I believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62
You can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason.
**Possible fix**
I have very limited understanding on what that context should be before reaching this function but possible fixes include:
1. Revert to the old way of assigning the DB type.
2. To test for None context before assigning a type.
```
if context is None:
context = {}
```
3. or make sure span.context default value is an empty dict instead of None
**To Reproduce**
I have no easy way to reproduce this crash because it does not happen all the time.
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.7.11
- Framework and version [e.g. Django 2.1]: Flask 2.0.1
- APM Server version: 7.12
- Agent version: 6.3.0+
**Aditional Information**
Our app in launched in Gunicorn using gevent workers.
ElasticAPM crash because of a missing context since version 6.3.0
**Describe the bug**:
Since ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash.
Here's a snippet of a strack trace from our app:
```
...
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 168, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1026, in get
"GET", _make_path(index, doc_type, id), params=params, headers=headers
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 113, in call
result_data = wrapped(*args, **kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py", line 388, in perform_request
timeout=timeout,
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 56, in call
self._update_context_by_request_data(span.context, instance, args, kwargs)
File "/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py", line 72, in _update_context_by_request_data
context["db"] = {"type": "elasticsearch"}
TypeError: 'NoneType' object does not support item assignment
```
I believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62
You can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason.
**Possible fix**
I have very limited understanding on what that context should be before reaching this function but possible fixes include:
1. Revert to the old way of assigning the DB type.
2. To test for None context before assigning a type.
```
if context is None:
context = {}
```
3. or make sure span.context default value is an empty dict instead of None
**To Reproduce**
I have no easy way to reproduce this crash because it does not happen all the time.
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.7.11
- Framework and version [e.g. Django 2.1]: Flask 2.0.1
- APM Server version: 7.12
- Agent version: 6.3.0+
**Aditional Information**
Our app in launched in Gunicorn using gevent workers.
</issue>
<code>
[start of elasticapm/instrumentation/packages/elasticsearch.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 import re
34
35 import elasticapm
36 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
37 from elasticapm.traces import execution_context
38 from elasticapm.utils.logging import get_logger
39
40 logger = get_logger("elasticapm.instrument")
41
42 should_capture_body_re = re.compile("/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)")
43
44
45 class ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):
46 name = "elasticsearch_connection"
47
48 instrument_list = [
49 ("elasticsearch.connection.http_urllib3", "Urllib3HttpConnection.perform_request"),
50 ("elasticsearch.connection.http_requests", "RequestsHttpConnection.perform_request"),
51 ]
52
53 def call(self, module, method, wrapped, instance, args, kwargs):
54 span = execution_context.get_span()
55
56 self._update_context_by_request_data(span.context, instance, args, kwargs)
57
58 status_code, headers, raw_data = wrapped(*args, **kwargs)
59
60 span.context["http"] = {"status_code": status_code}
61
62 return status_code, headers, raw_data
63
64 def _update_context_by_request_data(self, context, instance, args, kwargs):
65 args_len = len(args)
66 url = args[1] if args_len > 1 else kwargs.get("url")
67 params = args[2] if args_len > 2 else kwargs.get("params")
68 body_serialized = args[3] if args_len > 3 else kwargs.get("body")
69
70 should_capture_body = bool(should_capture_body_re.search(url))
71
72 context["db"] = {"type": "elasticsearch"}
73 if should_capture_body:
74 query = []
75 # using both q AND body is allowed in some API endpoints / ES versions,
76 # but not in others. We simply capture both if they are there so the
77 # user can see it.
78 if params and "q" in params:
79 # 'q' is already encoded to a byte string at this point
80 # we assume utf8, which is the default
81 query.append("q=" + params["q"].decode("utf-8", errors="replace"))
82 if body_serialized:
83 if isinstance(body_serialized, bytes):
84 query.append(body_serialized.decode("utf-8", errors="replace"))
85 else:
86 query.append(body_serialized)
87 if query:
88 context["db"]["statement"] = "\n\n".join(query)
89
90 context["destination"] = {
91 "address": instance.host,
92 "service": {"name": "elasticsearch", "resource": "elasticsearch", "type": "db"},
93 }
94
95
96 class ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):
97 name = "elasticsearch_connection"
98
99 instrument_list = [
100 ("elasticsearch.transport", "Transport.perform_request"),
101 ]
102
103 def call(self, module, method, wrapped, instance, args, kwargs):
104 with elasticapm.capture_span(
105 self._get_signature(args, kwargs),
106 span_type="db",
107 span_subtype="elasticsearch",
108 span_action="query",
109 extra={},
110 skip_frames=2,
111 leaf=True,
112 ) as span:
113 result_data = wrapped(*args, **kwargs)
114
115 try:
116 span.context["db"]["rows_affected"] = result_data["hits"]["total"]["value"]
117 except (KeyError, TypeError):
118 pass
119
120 return result_data
121
122 def _get_signature(self, args, kwargs):
123 args_len = len(args)
124 http_method = args[0] if args_len else kwargs.get("method")
125 http_path = args[1] if args_len > 1 else kwargs.get("url")
126
127 return "ES %s %s" % (http_method, http_path)
128
[end of elasticapm/instrumentation/packages/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py
--- a/elasticapm/instrumentation/packages/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/elasticsearch.py
@@ -34,7 +34,7 @@
import elasticapm
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
-from elasticapm.traces import execution_context
+from elasticapm.traces import DroppedSpan, execution_context
from elasticapm.utils.logging import get_logger
logger = get_logger("elasticapm.instrument")
@@ -52,6 +52,8 @@
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
+ if isinstance(span, DroppedSpan):
+ return wrapped(*args, **kwargs)
self._update_context_by_request_data(span.context, instance, args, kwargs)
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/elasticsearch.py\n@@ -34,7 +34,7 @@\n \n import elasticapm\n from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\n-from elasticapm.traces import execution_context\n+from elasticapm.traces import DroppedSpan, execution_context\n from elasticapm.utils.logging import get_logger\n \n logger = get_logger(\"elasticapm.instrument\")\n@@ -52,6 +52,8 @@\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n+ if isinstance(span, DroppedSpan):\n+ return wrapped(*args, **kwargs)\n \n self._update_context_by_request_data(span.context, instance, args, kwargs)\n", "issue": "ElasticAPM crash because of a missing context since version 6.3.0\n**Describe the bug**: \r\n\r\nSince ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash. \r\n\r\nHere's a snippet of a strack trace from our app:\r\n```\r\n...\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py\", line 168, in _wrapped\r\n return func(*args, params=params, headers=headers, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py\", line 1026, in get\r\n \"GET\", _make_path(index, doc_type, id), params=params, headers=headers\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 113, in call\r\n result_data = wrapped(*args, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py\", line 388, in perform_request\r\n timeout=timeout,\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 56, in call\r\n self._update_context_by_request_data(span.context, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 72, in _update_context_by_request_data\r\n context[\"db\"] = {\"type\": \"elasticsearch\"}\r\nTypeError: 'NoneType' object does not support item assignment\r\n```\r\n\r\nI believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62\r\n\r\nYou can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason. \r\n\r\n**Possible fix**\r\nI have very limited understanding on what that context should be before reaching this function but possible fixes include: \r\n\r\n1. Revert to the old way of assigning the DB type.\r\n\r\n2. To test for None context before assigning a type.\r\n```\r\nif context is None:\r\n context = {}\r\n```\r\n\r\n3. or make sure span.context default value is an empty dict instead of None\r\n\r\n**To Reproduce**\r\n\r\nI have no easy way to reproduce this crash because it does not happen all the time. \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.7.11\r\n- Framework and version [e.g. Django 2.1]: Flask 2.0.1\r\n- APM Server version: 7.12\r\n- Agent version: 6.3.0+\r\n\r\n**Aditional Information**\r\n\r\nOur app in launched in Gunicorn using gevent workers.\nElasticAPM crash because of a missing context since version 6.3.0\n**Describe the bug**: \r\n\r\nSince ElasticAPM release 6.3.0 our application has started to see a ton of crashes that we believed are cause by a recent change in ElasticAPM. The elasticsearch instrumentation package is trying to assign the type of DB to elasticsearch into an None context which causes it to crash. \r\n\r\nHere's a snippet of a strack trace from our app:\r\n```\r\n...\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/utils.py\", line 168, in _wrapped\r\n return func(*args, params=params, headers=headers, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/client/__init__.py\", line 1026, in get\r\n \"GET\", _make_path(index, doc_type, id), params=params, headers=headers\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 113, in call\r\n result_data = wrapped(*args, **kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticsearch/transport.py\", line 388, in perform_request\r\n timeout=timeout,\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 56, in call\r\n self._update_context_by_request_data(span.context, instance, args, kwargs)\r\n File \"/var/lib/assemblyline/.local/lib/python3.7/site-packages/elasticapm/instrumentation/packages/elasticsearch.py\", line 72, in _update_context_by_request_data\r\n context[\"db\"] = {\"type\": \"elasticsearch\"}\r\nTypeError: 'NoneType' object does not support item assignment\r\n```\r\n\r\nI believe this crash is related to a recent change in this commit: https://github.com/elastic/apm-agent-python/commit/ee75cb8f238303b79d0f697a7f2eca547a1dfe8c#diff-c8fb731f92134757656c157f5c3175bcb62e131c1fed1aec5041367603c204d0L62\r\n\r\nYou can see here, the context was previously assigned it's DB type in a way where even if the context was None it would still work but now it assumes the context is a dictionary. I'm not creating a PR to fix this because I'm not 100% sure if the old way was changed for a reason. \r\n\r\n**Possible fix**\r\nI have very limited understanding on what that context should be before reaching this function but possible fixes include: \r\n\r\n1. Revert to the old way of assigning the DB type.\r\n\r\n2. To test for None context before assigning a type.\r\n```\r\nif context is None:\r\n context = {}\r\n```\r\n\r\n3. or make sure span.context default value is an empty dict instead of None\r\n\r\n**To Reproduce**\r\n\r\nI have no easy way to reproduce this crash because it does not happen all the time. \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.7.11\r\n- Framework and version [e.g. Django 2.1]: Flask 2.0.1\r\n- APM Server version: 7.12\r\n- Agent version: 6.3.0+\r\n\r\n**Aditional Information**\r\n\r\nOur app in launched in Gunicorn using gevent workers.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n status_code, headers, raw_data = wrapped(*args, **kwargs)\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return status_code, headers, raw_data\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' is already encoded to a byte string at this point\n # we assume utf8, which is the default\n query.append(\"q=\" + params[\"q\"].decode(\"utf-8\", errors=\"replace\"))\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n context[\"destination\"] = {\n \"address\": instance.host,\n \"service\": {\"name\": \"elasticsearch\", \"resource\": \"elasticsearch\", \"type\": \"db\"},\n }\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n try:\n span.context[\"db\"][\"rows_affected\"] = result_data[\"hits\"][\"total\"][\"value\"]\n except (KeyError, TypeError):\n pass\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n\n return \"ES %s %s\" % (http_method, http_path)\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}]} | 3,810 | 210 |
gh_patches_debug_24065 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regression with new database connection handling/transaction isolation
### Background
Digital Earth Australia provides a Python Environment on the NCI which includes ODC. It's currently running the very old version 1.8.8 under Python 3.8.13 and we want to upgrade to the latest version. We've created a new environment using ODC core version 1.8.17 running under Python 3.10.8, but in testing we've been getting errors which weren't occuring before.
This issue was reported and discussed internally in [Jira Issue DSNS-304](https://gajira.atlassian.net/browse/DSNS-304), and included code to reproduce the error.
### Error
The problem was reported as an unhandled PostgreSQL exception being thrown in a production script, which used to work cleanly.
<details>
<summary>Original code and stack trace from error</summary>
When executing the following script with the new `module load dea/20231204`
```python
#!/usr/bin/env python3
import datacube
message = """Will this work? Will this cause the new dea module to error out?"""
def main(product="ga_ls8c_ard_3"):
dc = datacube.Datacube(app="gen-list")
for l1_dataset in dc.index.datasets.search(product=product):
with open('io.txt', 'a') as file:
file.write(message + '\n')
if __name__ == "__main__":
main(product="ga_s2am_ard_3")
```
**Stack Trace**
```
Traceback (most recent call last):
File "/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py", line 18, in <module>
main(product="ga_s2am_ard_3")
File "/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py", line 14, in main
for l1_dataset in dc.index.datasets.search(product=product):
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py", line 548, in search
for product, datasets in self._do_search_by_product(query,
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py", line 691, in _do_search_by_product
with self._db_connection() as connection:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_transaction.py", line 60, in _db_connection
with self._index._active_connection(transaction=transaction) as conn:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/index.py", line 157, in _active_connection
with self._db._connect() as conn:
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py", line 142, in __exit__
next(self.gen)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/drivers/postgres/_connections.py", line 231, in _connect
connection.close()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1251, in close
conn.close()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 1166, in close
self._checkin()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 1008, in _checkin
_finalize_fairy(
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 800, in _finalize_fairy
connection_record.checkin()
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py", line 542, in checkin
finalizer(connection)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py", line 677, in _reset_characteristics
characteristic.reset_characteristic(self, dbapi_connection)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py", line 50, in reset_characteristic
dialect.reset_isolation_level(dbapi_conn)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py", line 748, in reset_isolation_level
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
File "/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 851, in set_isolation_level
connection.set_isolation_level(level)
psycopg2.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
```
</details>
I believe @Ariana-B did some investigation resulting in https://github.com/opendatacube/datacube-core/pull/1525 , especially in relation to the `connection.set_isolation_level()` changes, but it hasn't yet been resolved.
### New Findings
I've run some more tests today, and have more of an idea of what the issue is.
- The network configuration at the NCI drops idle TCP connections after 5 minutes. This is extremely short, the normal default is 2 hours.
- The above code first loads 4Gb of database results into memory (about 3 minutes), then slowly writes lines to disk IO while iterating over DB results (more than 10 minutes for the ~500k results).
- Under ODC 1.8.8, the script then exits cleanly.
- Under ODC 1.8.17, something attempts to use the db connection to change the _isolation level_. That fails because the network connection was idle for more than 5 minutes.
I'm not sure whether the change is ODC or SQLAlchemy or psycopg2, but I suspect it's ODC.
I'm about to write up a proposed improvement in a new issue, to allow passing arbitrary connection parameters to `libpq`, which will allow working around the TCP idle timeout issue here.
Deprecation Warning on `pkg_resources` in 1.9 branch
When running from the `develop-1.9` branch with Python 3.10 I'm getting unwanted warning messages.
```
/home/547/dra547/.dea-sandbox/dea/20231204/local/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning:
pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html
from pkg_resources import iter_entry_points
```
This was fixed a while ago in #1464 in the `develop` branch and subsequent releases, but has snuck back into 1.9.
</issue>
<code>
[start of datacube/drivers/driver_cache.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2024 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import logging
6 from typing import Dict, Any, Tuple, Iterable
7
8 _LOG = logging.getLogger(__name__)
9
10
11 def load_drivers(group: str) -> Dict[str, Any]:
12 """
13 Load available drivers for a given group name.
14
15 Gracefully handles:
16
17 - Driver module not able to be imported
18 - Driver init function throwing an exception or returning None
19
20 By having driver entry_points pointing to a function, we defer loading the driver
21 module or running any code until required.
22
23 :param group: Name of the entry point group e.g. "datacube.plugins.io.read"
24
25 :returns: Dictionary String -> Driver Object
26 """
27
28 def safe_load(ep):
29 from pkg_resources import DistributionNotFound
30 # pylint: disable=broad-except,bare-except
31 try:
32 driver_init = ep.load()
33 except DistributionNotFound:
34 # This happens when entry points were marked with extra features,
35 # but extra feature were not requested for installation
36 return None
37 except Exception as e:
38 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
39 _LOG.warning('Error was: %s', repr(e))
40 return None
41
42 try:
43 driver = driver_init()
44 except Exception:
45 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)
46 return None
47
48 if driver is None:
49 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)
50
51 return driver
52
53 def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
54 from pkg_resources import iter_entry_points
55 for ep in iter_entry_points(group=group, name=None):
56 driver = safe_load(ep)
57 if driver is not None:
58 yield (ep.name, driver)
59
60 return dict((name, driver) for name, driver in resolve_all(group))
61
[end of datacube/drivers/driver_cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py
--- a/datacube/drivers/driver_cache.py
+++ b/datacube/drivers/driver_cache.py
@@ -26,14 +26,9 @@
"""
def safe_load(ep):
- from pkg_resources import DistributionNotFound
# pylint: disable=broad-except,bare-except
try:
driver_init = ep.load()
- except DistributionNotFound:
- # This happens when entry points were marked with extra features,
- # but extra feature were not requested for installation
- return None
except Exception as e:
_LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
_LOG.warning('Error was: %s', repr(e))
@@ -51,8 +46,8 @@
return driver
def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
- from pkg_resources import iter_entry_points
- for ep in iter_entry_points(group=group, name=None):
+ from importlib_metadata import entry_points
+ for ep in entry_points(group=group):
driver = safe_load(ep)
if driver is not None:
yield (ep.name, driver)
| {"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -26,14 +26,9 @@\n \"\"\"\n \n def safe_load(ep):\n- from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n- except DistributionNotFound:\n- # This happens when entry points were marked with extra features,\n- # but extra feature were not requested for installation\n- return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n@@ -51,8 +46,8 @@\n return driver\n \n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n- from pkg_resources import iter_entry_points\n- for ep in iter_entry_points(group=group, name=None):\n+ from importlib_metadata import entry_points\n+ for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n", "issue": "Regression with new database connection handling/transaction isolation\n### Background \r\nDigital Earth Australia provides a Python Environment on the NCI which includes ODC. It's currently running the very old version 1.8.8 under Python 3.8.13 and we want to upgrade to the latest version. We've created a new environment using ODC core version 1.8.17 running under Python 3.10.8, but in testing we've been getting errors which weren't occuring before.\r\n\r\nThis issue was reported and discussed internally in [Jira Issue DSNS-304](https://gajira.atlassian.net/browse/DSNS-304), and included code to reproduce the error.\r\n\r\n### Error\r\n\r\nThe problem was reported as an unhandled PostgreSQL exception being thrown in a production script, which used to work cleanly.\r\n\r\n<details>\r\n<summary>Original code and stack trace from error</summary>\r\n\r\nWhen executing the following script with the new `module load dea/20231204`\r\n\r\n```python\r\n#!/usr/bin/env python3\r\n\r\nimport datacube\r\n\r\nmessage = \"\"\"Will this work? Will this cause the new dea module to error out?\"\"\"\r\n\r\ndef main(product=\"ga_ls8c_ard_3\"):\r\n dc = datacube.Datacube(app=\"gen-list\")\r\n\r\n for l1_dataset in dc.index.datasets.search(product=product):\r\n with open('io.txt', 'a') as file:\r\n file.write(message + '\\n')\r\n\r\nif __name__ == \"__main__\":\r\n main(product=\"ga_s2am_ard_3\")\r\n```\r\n**Stack Trace**\r\n```\r\nTraceback (most recent call last):\r\n File \"/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py\", line 18, in <module>\r\n main(product=\"ga_s2am_ard_3\")\r\n File \"/g/data/u46/users/dsg547/sandbox/ard_pipeline_support/tickets/DSNS-304/example/./odc_call.py\", line 14, in main\r\n for l1_dataset in dc.index.datasets.search(product=product):\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py\", line 548, in search\r\n for product, datasets in self._do_search_by_product(query,\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_datasets.py\", line 691, in _do_search_by_product\r\n with self._db_connection() as connection:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/_transaction.py\", line 60, in _db_connection\r\n with self._index._active_connection(transaction=transaction) as conn:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/index/postgres/index.py\", line 157, in _active_connection\r\n with self._db._connect() as conn:\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/contextlib.py\", line 142, in __exit__\r\n next(self.gen)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/datacube/drivers/postgres/_connections.py\", line 231, in _connect\r\n connection.close()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/base.py\", line 1251, in close\r\n conn.close()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 1166, in close\r\n self._checkin()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 1008, in _checkin\r\n _finalize_fairy(\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 800, in _finalize_fairy\r\n connection_record.checkin()\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/pool/base.py\", line 542, in checkin\r\n finalizer(connection)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py\", line 677, in _reset_characteristics\r\n characteristic.reset_characteristic(self, dbapi_connection)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/characteristics.py\", line 50, in reset_characteristic\r\n dialect.reset_isolation_level(dbapi_conn)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/engine/default.py\", line 748, in reset_isolation_level\r\n self.set_isolation_level(dbapi_conn, self.default_isolation_level)\r\n File \"/g/data/v10/public/modules/dea/20231204/lib/python3.10/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py\", line 851, in set_isolation_level\r\n connection.set_isolation_level(level)\r\npsycopg2.OperationalError: server closed the connection unexpectedly\r\n This probably means the server terminated abnormally\r\n before or while processing the request.\r\n```\r\n\r\n</details>\r\n\r\n\r\nI believe @Ariana-B did some investigation resulting in https://github.com/opendatacube/datacube-core/pull/1525 , especially in relation to the `connection.set_isolation_level()` changes, but it hasn't yet been resolved.\r\n\r\n### New Findings\r\n\r\nI've run some more tests today, and have more of an idea of what the issue is.\r\n\r\n- The network configuration at the NCI drops idle TCP connections after 5 minutes. This is extremely short, the normal default is 2 hours.\r\n- The above code first loads 4Gb of database results into memory (about 3 minutes), then slowly writes lines to disk IO while iterating over DB results (more than 10 minutes for the ~500k results).\r\n- Under ODC 1.8.8, the script then exits cleanly.\r\n- Under ODC 1.8.17, something attempts to use the db connection to change the _isolation level_. That fails because the network connection was idle for more than 5 minutes.\r\n\r\nI'm not sure whether the change is ODC or SQLAlchemy or psycopg2, but I suspect it's ODC.\r\n\r\nI'm about to write up a proposed improvement in a new issue, to allow passing arbitrary connection parameters to `libpq`, which will allow working around the TCP idle timeout issue here.\nDeprecation Warning on `pkg_resources` in 1.9 branch\nWhen running from the `develop-1.9` branch with Python 3.10 I'm getting unwanted warning messages.\r\n\r\n```\r\n/home/547/dra547/.dea-sandbox/dea/20231204/local/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning: \r\npkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\r\n from pkg_resources import iter_entry_points\r\n```\r\n\r\nThis was fixed a while ago in #1464 in the `develop` branch and subsequent releases, but has snuck back into 1.9.\r\n\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2024 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except DistributionNotFound:\n # This happens when entry points were marked with extra features,\n # but extra feature were not requested for installation\n return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from pkg_resources import iter_entry_points\n for ep in iter_entry_points(group=group, name=None):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}]} | 3,001 | 272 |
gh_patches_debug_17838 | rasdani/github-patches | git_diff | voxel51__fiftyone-1283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FR] update opencv-python-headless
### Proposal Summary
Currently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """
3 Installs FiftyOne.
4
5 | Copyright 2017-2021, Voxel51, Inc.
6 | `voxel51.com <https://voxel51.com/>`_
7 |
8 """
9 import os
10 from setuptools import setup, find_packages
11 from wheel.bdist_wheel import bdist_wheel
12
13
14 class BdistWheelCustom(bdist_wheel):
15 def finalize_options(self):
16 bdist_wheel.finalize_options(self)
17 # make just the wheel require these packages, since they aren't needed
18 # for a development installation
19 self.distribution.install_requires += [
20 "fiftyone-brain>=0.7,<0.8",
21 "fiftyone-db>=0.3,<0.4",
22 ]
23
24
25 VERSION = "0.13.2"
26
27
28 def get_version():
29 if "RELEASE_VERSION" in os.environ:
30 version = os.environ["RELEASE_VERSION"]
31 if not version.startswith(VERSION):
32 raise ValueError(
33 "Release version does not match version: %s and %s"
34 % (version, VERSION)
35 )
36 return version
37
38 return VERSION
39
40
41 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.16,<0.17"]}
42
43
44 with open("README.md", "r") as fh:
45 long_description = fh.read()
46
47
48 setup(
49 name="fiftyone",
50 version=get_version(),
51 description=(
52 "FiftyOne: the open-source tool for building high-quality datasets "
53 "and computer vision models"
54 ),
55 author="Voxel51, Inc.",
56 author_email="[email protected]",
57 url="https://github.com/voxel51/fiftyone",
58 extras_require=EXTRAS_REQUIREMENTS,
59 license="Apache",
60 long_description=long_description,
61 long_description_content_type="text/markdown",
62 packages=find_packages() + ["fiftyone.recipes", "fiftyone.tutorials"],
63 package_dir={
64 "fiftyone.recipes": "docs/source/recipes",
65 "fiftyone.tutorials": "docs/source/tutorials",
66 },
67 include_package_data=True,
68 install_requires=[
69 # third-party packages
70 "argcomplete",
71 "boto3",
72 "Deprecated",
73 "eventlet",
74 "future",
75 "Jinja2",
76 "kaleido",
77 "matplotlib",
78 "mongoengine==0.20.0",
79 "motor>=2.3,<3",
80 "numpy",
81 "packaging",
82 "pandas",
83 "Pillow>=6.2",
84 "plotly>=4.14,<5",
85 "pprintpp",
86 "psutil",
87 "pymongo>=3.11,<4",
88 "PyYAML",
89 "retrying",
90 "scikit-learn",
91 "scikit-image",
92 "setuptools",
93 "tabulate",
94 "tornado>=5.1.1,<7",
95 "xmltodict",
96 "universal-analytics-python3>=1.0.1,<2",
97 # internal packages
98 "voxel51-eta>=0.5.2,<0.6",
99 # ETA dependency - restricted to a maximum version known to provide
100 # wheels here because it tends to publish sdists several hours before
101 # wheels. When users install FiftyOne in this window, they will need to
102 # compile OpenCV from source, leading to either errors or a
103 # time-consuming installation.
104 "opencv-python-headless<=4.4.0.46",
105 ],
106 classifiers=[
107 "Development Status :: 4 - Beta",
108 "Intended Audience :: Developers",
109 "Intended Audience :: Science/Research",
110 "License :: OSI Approved :: Apache Software License",
111 "Topic :: Scientific/Engineering :: Artificial Intelligence",
112 "Topic :: Scientific/Engineering :: Image Processing",
113 "Topic :: Scientific/Engineering :: Image Recognition",
114 "Topic :: Scientific/Engineering :: Information Analysis",
115 "Topic :: Scientific/Engineering :: Visualization",
116 "Operating System :: MacOS :: MacOS X",
117 "Operating System :: POSIX :: Linux",
118 "Operating System :: Microsoft :: Windows",
119 "Programming Language :: Python :: 3",
120 "Programming Language :: Python :: 3.6",
121 "Programming Language :: Python :: 3.7",
122 "Programming Language :: Python :: 3.8",
123 "Programming Language :: Python :: 3.9",
124 ],
125 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]},
126 python_requires=">=3.6",
127 cmdclass={"bdist_wheel": BdistWheelCustom},
128 )
129
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,7 @@
"mongoengine==0.20.0",
"motor>=2.3,<3",
"numpy",
+ "opencv-python-headless",
"packaging",
"pandas",
"Pillow>=6.2",
@@ -96,12 +97,6 @@
"universal-analytics-python3>=1.0.1,<2",
# internal packages
"voxel51-eta>=0.5.2,<0.6",
- # ETA dependency - restricted to a maximum version known to provide
- # wheels here because it tends to publish sdists several hours before
- # wheels. When users install FiftyOne in this window, they will need to
- # compile OpenCV from source, leading to either errors or a
- # time-consuming installation.
- "opencv-python-headless<=4.4.0.46",
],
classifiers=[
"Development Status :: 4 - Beta",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,6 +78,7 @@\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n+ \"opencv-python-headless\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n@@ -96,12 +97,6 @@\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n- # ETA dependency - restricted to a maximum version known to provide\n- # wheels here because it tends to publish sdists several hours before\n- # wheels. When users install FiftyOne in this window, they will need to\n- # compile OpenCV from source, leading to either errors or a\n- # time-consuming installation.\n- \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n", "issue": "[FR] update opencv-python-headless\n### Proposal Summary\r\nCurrently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\nfrom wheel.bdist_wheel import bdist_wheel\n\n\nclass BdistWheelCustom(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n # make just the wheel require these packages, since they aren't needed\n # for a development installation\n self.distribution.install_requires += [\n \"fiftyone-brain>=0.7,<0.8\",\n \"fiftyone-db>=0.3,<0.4\",\n ]\n\n\nVERSION = \"0.13.2\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.16,<0.17\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages() + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n include_package_data=True,\n install_requires=[\n # third-party packages\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n # ETA dependency - restricted to a maximum version known to provide\n # wheels here because it tends to publish sdists several hours before\n # wheels. When users install FiftyOne in this window, they will need to\n # compile OpenCV from source, leading to either errors or a\n # time-consuming installation.\n \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n cmdclass={\"bdist_wheel\": BdistWheelCustom},\n)\n", "path": "setup.py"}]} | 1,916 | 242 |
gh_patches_debug_29558 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: booster API docstring is misaligned
### 🐛 Describe the bug
The example should follow markdown format to be correctly rendered on the documentation website.
### Environment
_No response_
</issue>
<code>
[start of colossalai/booster/booster.py]
1 import warnings
2 from contextlib import contextmanager
3 from typing import Callable, Iterator, List, Optional, Tuple, Union
4
5 import torch
6 import torch.nn as nn
7 from torch.optim import Optimizer
8 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
9 from torch.utils.data import DataLoader
10
11 from colossalai.checkpoint_io import GeneralCheckpointIO
12
13 from .accelerator import Accelerator
14 from .mixed_precision import MixedPrecision, mixed_precision_factory
15 from .plugin import Plugin
16
17 __all__ = ['Booster']
18
19
20 class Booster:
21 """
22 Booster is a high-level API for training neural networks. It provides a unified interface for
23 training with different precision, accelerator, and plugin.
24
25 Examples:
26 >>> colossalai.launch(...)
27 >>> plugin = GeminiPlugin(stage=3, ...)
28 >>> booster = Booster(precision='fp16', plugin=plugin)
29 >>>
30 >>> model = GPT2()
31 >>> optimizer = Adam(model.parameters())
32 >>> dataloader = Dataloader(Dataset)
33 >>> lr_scheduler = LinearWarmupScheduler()
34 >>> criterion = GPTLMLoss()
35 >>>
36 >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
37 >>>
38 >>> for epoch in range(max_epochs):
39 >>> for input_ids, attention_mask in dataloader:
40 >>> outputs = model(input_ids, attention_mask)
41 >>> loss = criterion(outputs.logits, input_ids)
42 >>> booster.backward(loss, optimizer)
43 >>> optimizer.step()
44 >>> lr_scheduler.step()
45 >>> optimizer.zero_grad()
46
47
48 Args:
49 device (str or torch.device): The device to run the training. Default: 'cuda'.
50 mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.
51 If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.
52 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.
53 plugin (Plugin): The plugin to run the training. Default: None.
54 """
55
56 def __init__(self,
57 device: str = 'cuda',
58 mixed_precision: Union[MixedPrecision, str] = None,
59 plugin: Optional[Plugin] = None) -> None:
60 if plugin is not None:
61 assert isinstance(
62 plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'
63 self.plugin = plugin
64
65 # set accelerator
66 if self.plugin and self.plugin.control_device():
67 self.accelerator = None
68 warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')
69 else:
70 self.accelerator = Accelerator(device)
71
72 # set precision
73 if self.plugin and self.plugin.control_precision():
74 warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')
75 self.mixed_precision = None
76 elif mixed_precision is None:
77 self.mixed_precision = None
78 else:
79 # validate and set precision
80 if isinstance(mixed_precision, str):
81 # the user will take the default arguments for amp training
82 self.mixed_precision = mixed_precision_factory(mixed_precision)
83 elif isinstance(mixed_precision, MixedPrecision):
84 # the user can customize the arguments by passing the precision object
85 self.mixed_precision = mixed_precision
86 else:
87 raise ValueError(
88 f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'
89 )
90
91 if self.plugin is not None and self.plugin.control_checkpoint_io():
92 self.checkpoint_io = self.plugin.get_checkpoint_io()
93 else:
94 self.checkpoint_io = GeneralCheckpointIO()
95
96 def boost(
97 self,
98 model: nn.Module,
99 optimizer: Optimizer,
100 criterion: Callable = None,
101 dataloader: DataLoader = None,
102 lr_scheduler: LRScheduler = None,
103 ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:
104 """
105 Boost the model, optimizer, criterion, lr_scheduler, and dataloader.
106
107 Args:
108 model (nn.Module): The model to be boosted.
109 optimizer (Optimizer): The optimizer to be boosted.
110 criterion (Callable): The criterion to be boosted.
111 dataloader (DataLoader): The dataloader to be boosted.
112 lr_scheduler (LRScheduler): The lr_scheduler to be boosted.
113 """
114 # TODO(FrankLeeeee): consider multi-model and multi-optimizer case
115 # TODO(FrankLeeeee): consider multi-dataloader case
116 # transform model for mixed precision
117 if self.plugin:
118 model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(
119 model, optimizer, criterion, dataloader, lr_scheduler)
120
121 if self.plugin and not self.plugin.control_device():
122 # transform model for accelerator
123 model = self.accelerator.configure(model)
124
125 if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):
126 # transform model for mixed precision
127 # when mixed_precision is specified and the plugin is not given or does not control the precision
128 model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)
129
130 return model, optimizer, criterion, dataloader, lr_scheduler
131
132 def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:
133 """Backward pass.
134
135 Args:
136 loss (torch.Tensor): The loss to be backpropagated.
137 optimizer (Optimizer): The optimizer to be updated.
138 """
139 # TODO: implement this method with plugin
140 optimizer.backward(loss)
141
142 def execute_pipeline(self,
143 data_iter: Iterator,
144 model: nn.Module,
145 criterion: Callable[[torch.Tensor], torch.Tensor],
146 optimizer: Optimizer,
147 return_loss: bool = True,
148 return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:
149 # TODO: implement this method
150 # run pipeline forward backward pass
151 # return loss or outputs if needed
152 pass
153
154 def no_sync(self, model: nn.Module) -> contextmanager:
155 """Context manager to disable gradient synchronization across DP process groups.
156
157 Args:
158 model (nn.Module): The model to be disabled gradient synchronization.
159
160 Returns:
161 contextmanager: Context to disable gradient synchronization.
162 """
163 assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'
164 assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'
165 return self.plugin.no_sync(model)
166
167 def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):
168 """Load model from checkpoint.
169
170 Args:
171 model (nn.Module): A model boosted by Booster.
172 checkpoint (str): Path to the checkpoint. It must be a local path.
173 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
174 strict (bool, optional): whether to strictly enforce that the keys
175 in :attr:`state_dict` match the keys returned by this module's
176 :meth:`~torch.nn.Module.state_dict` function. Defaults to True.
177 """
178 self.checkpoint_io.load_model(model, checkpoint, strict)
179
180 def save_model(self,
181 model: nn.Module,
182 checkpoint: str,
183 prefix: str = None,
184 shard: bool = False,
185 size_per_shard: int = 1024):
186 """Save model to checkpoint.
187
188 Args:
189 model (nn.Module): A model boosted by Booster.
190 checkpoint (str): Path to the checkpoint. It must be a local path.
191 It is a file path if ``shard=False``. Otherwise, it is a directory path.
192 prefix (str, optional): A prefix added to parameter and buffer
193 names to compose the keys in state_dict. Defaults to None.
194 shard (bool, optional): Whether to save checkpoint a sharded way.
195 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
196 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
197 """
198 self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)
199
200 def load_optimizer(self, optimizer: Optimizer, checkpoint: str):
201 """Load optimizer from checkpoint.
202
203 Args:
204 optimizer (Optimizer): An optimizer boosted by Booster.
205 checkpoint (str): Path to the checkpoint. It must be a local path.
206 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
207 """
208 self.checkpoint_io.load_optimizer(optimizer, checkpoint)
209
210 def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):
211 """Save optimizer to checkpoint.
212 Warning: Saving sharded optimizer checkpoint is not supported yet.
213
214 Args:
215 optimizer (Optimizer): An optimizer boosted by Booster.
216 checkpoint (str): Path to the checkpoint. It must be a local path.
217 It is a file path if ``shard=False``. Otherwise, it is a directory path.
218 shard (bool, optional): Whether to save checkpoint a sharded way.
219 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
220 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
221 """
222 self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)
223
224 def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
225 """Save lr scheduler to checkpoint.
226
227 Args:
228 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
229 checkpoint (str): Path to the checkpoint. It must be a local file path.
230 """
231 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
232
233 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
234 """Load lr scheduler from checkpoint.
235
236 Args:
237 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
238 checkpoint (str): Path to the checkpoint. It must be a local file path.
239 """
240 self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)
241
[end of colossalai/booster/booster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py
--- a/colossalai/booster/booster.py
+++ b/colossalai/booster/booster.py
@@ -23,27 +23,28 @@
training with different precision, accelerator, and plugin.
Examples:
- >>> colossalai.launch(...)
- >>> plugin = GeminiPlugin(stage=3, ...)
- >>> booster = Booster(precision='fp16', plugin=plugin)
- >>>
- >>> model = GPT2()
- >>> optimizer = Adam(model.parameters())
- >>> dataloader = Dataloader(Dataset)
- >>> lr_scheduler = LinearWarmupScheduler()
- >>> criterion = GPTLMLoss()
- >>>
- >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
- >>>
- >>> for epoch in range(max_epochs):
- >>> for input_ids, attention_mask in dataloader:
- >>> outputs = model(input_ids, attention_mask)
- >>> loss = criterion(outputs.logits, input_ids)
- >>> booster.backward(loss, optimizer)
- >>> optimizer.step()
- >>> lr_scheduler.step()
- >>> optimizer.zero_grad()
-
+ ```python
+ colossalai.launch(...)
+ plugin = GeminiPlugin(stage=3, ...)
+ booster = Booster(precision='fp16', plugin=plugin)
+
+ model = GPT2()
+ optimizer = Adam(model.parameters())
+ dataloader = Dataloader(Dataset)
+ lr_scheduler = LinearWarmupScheduler()
+ criterion = GPTLMLoss()
+
+ model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
+
+ for epoch in range(max_epochs):
+ for input_ids, attention_mask in dataloader:
+ outputs = model(input_ids, attention_mask)
+ loss = criterion(outputs.logits, input_ids)
+ booster.backward(loss, optimizer)
+ optimizer.step()
+ lr_scheduler.step()
+ optimizer.zero_grad()
+ ```
Args:
device (str or torch.device): The device to run the training. Default: 'cuda'.
| {"golden_diff": "diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py\n--- a/colossalai/booster/booster.py\n+++ b/colossalai/booster/booster.py\n@@ -23,27 +23,28 @@\n training with different precision, accelerator, and plugin.\n \n Examples:\n- >>> colossalai.launch(...)\n- >>> plugin = GeminiPlugin(stage=3, ...)\n- >>> booster = Booster(precision='fp16', plugin=plugin)\n- >>>\n- >>> model = GPT2()\n- >>> optimizer = Adam(model.parameters())\n- >>> dataloader = Dataloader(Dataset)\n- >>> lr_scheduler = LinearWarmupScheduler()\n- >>> criterion = GPTLMLoss()\n- >>>\n- >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n- >>>\n- >>> for epoch in range(max_epochs):\n- >>> for input_ids, attention_mask in dataloader:\n- >>> outputs = model(input_ids, attention_mask)\n- >>> loss = criterion(outputs.logits, input_ids)\n- >>> booster.backward(loss, optimizer)\n- >>> optimizer.step()\n- >>> lr_scheduler.step()\n- >>> optimizer.zero_grad()\n-\n+ ```python\n+ colossalai.launch(...)\n+ plugin = GeminiPlugin(stage=3, ...)\n+ booster = Booster(precision='fp16', plugin=plugin)\n+\n+ model = GPT2()\n+ optimizer = Adam(model.parameters())\n+ dataloader = Dataloader(Dataset)\n+ lr_scheduler = LinearWarmupScheduler()\n+ criterion = GPTLMLoss()\n+\n+ model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n+\n+ for epoch in range(max_epochs):\n+ for input_ids, attention_mask in dataloader:\n+ outputs = model(input_ids, attention_mask)\n+ loss = criterion(outputs.logits, input_ids)\n+ booster.backward(loss, optimizer)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+ ```\n \n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: booster API docstring is misaligned\n### \ud83d\udc1b Describe the bug\n\nThe example should follow markdown format to be correctly rendered on the documentation website.\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n >>> colossalai.launch(...)\n >>> plugin = GeminiPlugin(stage=3, ...)\n >>> booster = Booster(precision='fp16', plugin=plugin)\n >>>\n >>> model = GPT2()\n >>> optimizer = Adam(model.parameters())\n >>> dataloader = Dataloader(Dataset)\n >>> lr_scheduler = LinearWarmupScheduler()\n >>> criterion = GPTLMLoss()\n >>>\n >>> model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n >>>\n >>> for epoch in range(max_epochs):\n >>> for input_ids, attention_mask in dataloader:\n >>> outputs = model(input_ids, attention_mask)\n >>> loss = criterion(outputs.logits, input_ids)\n >>> booster.backward(loss, optimizer)\n >>> optimizer.step()\n >>> lr_scheduler.step()\n >>> optimizer.zero_grad()\n\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}]} | 3,511 | 509 |
gh_patches_debug_35712 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Python 3.9
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nox
16
17 TEST_DEPENDENCIES = [
18 "flask",
19 "freezegun",
20 "mock",
21 "oauth2client",
22 "pyopenssl",
23 "pytest",
24 "pytest-cov",
25 "pytest-localserver",
26 "requests",
27 "urllib3",
28 "cryptography",
29 "responses",
30 "grpcio",
31 ]
32
33 ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"]
34
35 BLACK_VERSION = "black==19.3b0"
36 BLACK_PATHS = [
37 "google",
38 "tests",
39 "tests_async",
40 "noxfile.py",
41 "setup.py",
42 "docs/conf.py",
43 ]
44
45
46 @nox.session(python="3.7")
47 def lint(session):
48 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
49 session.install(".")
50 session.run("black", "--check", *BLACK_PATHS)
51 session.run(
52 "flake8",
53 "--import-order-style=google",
54 "--application-import-names=google,tests,system_tests",
55 "google",
56 "tests",
57 "tests_async",
58 )
59 session.run(
60 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
61 )
62
63
64 @nox.session(python="3.6")
65 def blacken(session):
66 """Run black.
67
68 Format code to uniform standard.
69
70 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
71 That run uses an image that doesn't have 3.6 installed. Before updating this
72 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
73 """
74 session.install(BLACK_VERSION)
75 session.run("black", *BLACK_PATHS)
76
77
78 @nox.session(python=["3.6", "3.7", "3.8"])
79 def unit(session):
80 session.install(*TEST_DEPENDENCIES)
81 session.install(*(ASYNC_DEPENDENCIES))
82 session.install(".")
83 session.run(
84 "pytest",
85 "--cov=google.auth",
86 "--cov=google.oauth2",
87 "--cov=tests",
88 "tests",
89 "tests_async",
90 )
91
92
93 @nox.session(python=["2.7", "3.5"])
94 def unit_prev_versions(session):
95 session.install(*TEST_DEPENDENCIES)
96 session.install(".")
97 session.run(
98 "pytest", "--cov=google.auth", "--cov=google.oauth2", "--cov=tests", "tests"
99 )
100
101
102 @nox.session(python="3.7")
103 def cover(session):
104 session.install(*TEST_DEPENDENCIES)
105 session.install(*(ASYNC_DEPENDENCIES))
106 session.install(".")
107 session.run(
108 "pytest",
109 "--cov=google.auth",
110 "--cov=google.oauth2",
111 "--cov=tests",
112 "--cov=tests_async",
113 "--cov-report=",
114 "tests",
115 "tests_async",
116 )
117 session.run("coverage", "report", "--show-missing", "--fail-under=100")
118
119
120 @nox.session(python="3.7")
121 def docgen(session):
122 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
123 session.install(*TEST_DEPENDENCIES)
124 session.install("sphinx")
125 session.install(".")
126 session.run("rm", "-r", "docs/reference")
127 session.run(
128 "sphinx-apidoc",
129 "--output-dir",
130 "docs/reference",
131 "--separate",
132 "--module-first",
133 "google",
134 )
135
136
137 @nox.session(python="3.7")
138 def docs(session):
139 session.install("sphinx", "-r", "docs/requirements-docs.txt")
140 session.install(".")
141 session.run("make", "-C", "docs", "html")
142
143
144 @nox.session(python="pypy")
145 def pypy(session):
146 session.install(*TEST_DEPENDENCIES)
147 session.install(*ASYNC_DEPENDENCIES)
148 session.install(".")
149 session.run(
150 "pytest",
151 "--cov=google.auth",
152 "--cov=google.oauth2",
153 "--cov=tests",
154 "tests",
155 "tests_async",
156 )
157
[end of noxfile.py]
[start of setup.py]
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 DEPENDENCIES = (
22 "cachetools>=2.0.0,<5.0",
23 "pyasn1-modules>=0.2.1",
24 # rsa==4.5 is the last version to support 2.7
25 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
26 'rsa<4.6; python_version < "3.5"',
27 'rsa>=3.1.4,<5; python_version >= "3.5"',
28 "setuptools>=40.3.0",
29 "six>=1.9.0",
30 )
31
32 extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
33
34 with io.open("README.rst", "r") as fh:
35 long_description = fh.read()
36
37 version = "1.23.0"
38
39 setup(
40 name="google-auth",
41 version=version,
42 author="Google Cloud Platform",
43 author_email="[email protected]",
44 description="Google Authentication Library",
45 long_description=long_description,
46 url="https://github.com/googleapis/google-auth-library-python",
47 packages=find_packages(exclude=("tests*", "system_tests*")),
48 namespace_packages=("google",),
49 install_requires=DEPENDENCIES,
50 extras_require=extras,
51 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
52 license="Apache 2.0",
53 keywords="google auth oauth client",
54 classifiers=[
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.5",
59 "Programming Language :: Python :: 3.6",
60 "Programming Language :: Python :: 3.7",
61 "Programming Language :: Python :: 3.8",
62 "Development Status :: 5 - Production/Stable",
63 "Intended Audience :: Developers",
64 "License :: OSI Approved :: Apache Software License",
65 "Operating System :: POSIX",
66 "Operating System :: Microsoft :: Windows",
67 "Operating System :: MacOS :: MacOS X",
68 "Operating System :: OS Independent",
69 "Topic :: Internet :: WWW/HTTP",
70 ],
71 )
72
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -75,7 +75,7 @@
session.run("black", *BLACK_PATHS)
[email protected](python=["3.6", "3.7", "3.8"])
[email protected](python=["3.6", "3.7", "3.8", "3.9"])
def unit(session):
session.install(*TEST_DEPENDENCIES)
session.install(*(ASYNC_DEPENDENCIES))
@@ -90,7 +90,7 @@
)
[email protected](python=["2.7", "3.5"])
[email protected](python=["2.7"])
def unit_prev_versions(session):
session.install(*TEST_DEPENDENCIES)
session.install(".")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,8 +23,8 @@
"pyasn1-modules>=0.2.1",
# rsa==4.5 is the last version to support 2.7
# https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
- 'rsa<4.6; python_version < "3.5"',
- 'rsa>=3.1.4,<5; python_version >= "3.5"',
+ 'rsa<4.6; python_version < "3.6"',
+ 'rsa>=3.1.4,<5; python_version >= "3.6"',
"setuptools>=40.3.0",
"six>=1.9.0",
)
@@ -48,17 +48,17 @@
namespace_packages=("google",),
install_requires=DEPENDENCIES,
extras_require=extras,
- python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
license="Apache 2.0",
keywords="google auth oauth client",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -75,7 +75,7 @@\n session.run(\"black\", *BLACK_PATHS)\n \n \[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\n def unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n@@ -90,7 +90,7 @@\n )\n \n \[email protected](python=[\"2.7\", \"3.5\"])\[email protected](python=[\"2.7\"])\n def unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,8 +23,8 @@\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n- 'rsa<4.6; python_version < \"3.5\"',\n- 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n+ 'rsa<4.6; python_version < \"3.6\"',\n+ 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n )\n@@ -48,17 +48,17 @@\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n- python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n", "issue": "Add support for Python 3.9\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.5\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.23.0\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 2,751 | 627 |
gh_patches_debug_10654 | rasdani/github-patches | git_diff | quantumlib__Cirq-3404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Read the docs has goofy copyright
**Description of the issue**
Copyright shows up on cirq tutorials on readthedocs.
**How to reproduce the issue**
Go to https://cirq.readthedocs.io/en/latest/docs/gates.html or other pages. Top part says "Copyright 2020 The Cirq Developers" with apache license.
**Cirq version**
0.9.1. It probably broke when we adjusted all the copyright notices.
</issue>
<code>
[start of rtd_docs/conf.py]
1 # -*- coding: utf-8 -*-
2 # coverage: ignore
3
4 # The content for all documentation lives in ../docs. That folder is
5 # following the structure for the Google Quantum site configured for the
6 # internal CMS, devsite. The readthedocs layer is a secondary, which generates
7 # the content using sphinx to readthedocs.io until we go live with the devsite.
8 #
9 # This is the configuration file for the Sphinx documentation builder.
10 # See http://www.sphinx-doc.org/en/master/config for help
11
12 # -- Path setup --------------------------------------------------------------
13
14 # If extensions (or modules to document with autodoc) are in another directory,
15 # add these directories to sys.path here. If the directory is relative to the
16 # documentation root, use os.path.abspath to make it absolute, like shown here.
17 #
18 import inspect
19 import re
20 from typing import List, Any
21
22 import os
23 import sys
24 import shutil
25
26 import pypandoc
27
28 cirq_root_path = os.path.dirname(os.path.dirname(__file__))
29 sys.path.insert(0, cirq_root_path)
30 from cirq import _doc
31
32
33 def setup(app):
34 # just in case it exists (locally) remove the copied docs folder
35 shutil.rmtree("./docs", ignore_errors=True)
36 # copy recursively the actual content from the devsite folder
37 # to rtd_docs/docs
38 shutil.copytree(src="../docs", dst="./docs")
39 app.add_config_value('pandoc_use_parser', 'markdown', True)
40 app.connect('autodoc-process-docstring', autodoc_process)
41 app.connect('autodoc-skip-member', autodoc_skip_member)
42 app.connect('source-read', source_read)
43
44
45 def convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:
46 if all('$$' not in line for line in lines):
47 return lines
48
49 data = '\n'.join(lines)
50 sections = data.split('$$')
51 if len(sections) % 2 != 1:
52 raise ValueError('Mismatched number of "$$" latex tokens.')
53
54 result = []
55 for i, s in enumerate(sections):
56 if i % 2:
57 # Avoid getting split across divs.
58 s = ' '.join(s.split('\n'))
59 # Avoid intermediate layers turning our newlines into slashes.
60 s = s.replace('\\\\', r'\newline')
61 # Turn latex like "|x\rangle" into "|x \rangle".
62 # The extra space seems to be necessary to survive a later pass.
63 s = re.sub(r'([a-zA-Z0-9])\\', r'\1 \\', s)
64 # Keep the $$ so MathJax can find it.
65 result.append('$${}$$'.format(s))
66 else:
67 # Work around bad table detection in pandoc by concatenating
68 # lines from the same paragraph.
69 s = '\n\n'.join(e.replace('\n', ' ') for e in s.split('\n\n'))
70
71 # Convert markdown to rst.
72 out = pypandoc.convert(s, to='rst', format='markdown_github')
73
74 # Not sure why pandoc is escaping these...
75 out = out.replace(r'\|', '|')
76
77 result.extend(out.split('\n'))
78
79 return result
80
81
82 def autodoc_skip_member(
83 app,
84 what: str,
85 name: str,
86 obj: Any,
87 skip: bool,
88 options,
89 ) -> bool:
90 """Public members already kept. Also include members marked as documented.
91 """
92 # Never skip if explicitly whitelisted.
93 if id(obj) in _doc.RECORDED_CONST_DOCS:
94 return False
95 # Skip all private methods.
96 if name.startswith('_'):
97 return True
98 # Fallback to default.
99 return skip
100
101
102 def autodoc_process(app, what: str, name: str, obj: Any, options,
103 lines: List[str]) -> None:
104 # Try to lookup in documented dictionary.
105 doc_string = _doc.RECORDED_CONST_DOCS.get(id(obj))
106 if name.startswith('cirq') and doc_string is not None:
107 # Override docstring if requested.
108 if doc_string is not None:
109 new_doc_string = inspect.cleandoc(doc_string)
110 lines[:] = new_doc_string.split('\n')
111 elif not (getattr(obj, '__module__', 'cirq') or '').startswith('cirq'):
112 # Don't convert objects from other modules.
113 return
114
115 # Don't convert output from Napoleon extension, which is already rst.
116 i = 0
117 while i < len(lines) and not lines[i].startswith(':'):
118 i += 1
119 if not i:
120 return
121
122 converted_lines = convert_markdown_mathjax_for_rst(lines[:i])
123 kept_lines = lines[i:]
124
125 data = pypandoc.convert(
126 '\n'.join(converted_lines),
127 to='rst',
128 format='markdown_github',
129 )
130
131 lines[:] = data.split('\n') + kept_lines
132
133
134 def source_read(app, docname, source):
135 source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"',
136 r'"**\1**"', source[0])
137 source[0] = re.sub(r'"<table.*tfo-notebook-buttons.*"</table>"',
138 r'""',
139 source[0],
140 flags=re.S)
141
142
143 # -- Project information -----------------------------------------------------
144
145 project = 'Cirq'
146 copyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin
147 author = 'The Cirq Developers'
148
149 # The full version, including alpha/beta/rc tags
150 __version__ = ''
151 exec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())
152 release = __version__
153
154 # The short X.Y version
155 version = release # '.'.join(release.split('.')[:2])
156
157 # -- General configuration ---------------------------------------------------
158
159 # If your documentation needs a minimal Sphinx version, state it here.
160 # needs_sphinx = '1.0'
161
162 # Add any Sphinx extension module names here, as strings. They can be
163 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
164 # ones.
165 extensions = [
166 'myst_parser',
167 'nbsphinx',
168 'sphinx.ext.autodoc',
169 'sphinx.ext.autosummary',
170 'sphinx.ext.doctest',
171 'sphinx.ext.mathjax',
172 'sphinx.ext.napoleon',
173 'sphinx.ext.viewcode',
174 'sphinx_markdown_tables',
175 ]
176
177 # Add any paths that contain templates here, relative to this directory.
178 templates_path = ['_templates']
179
180 # Allow markdown includes.
181 # http://www.sphinx-doc.org/en/master/markdown.html
182 # The suffix(es) of source filenames.
183 # You can specify multiple suffix as a list of string:
184 #
185 source_suffix = {
186 '.rst': 'restructuredtext',
187 '.md': 'markdown',
188 }
189
190 # The master toctree document.
191 master_doc = 'index'
192
193 # The language for content autogenerated by Sphinx. Refer to documentation
194 # for a list of supported languages.
195 #
196 # This is also used if you do content translation via gettext catalogs.
197 # Usually you set "language" from the command line for these cases.
198 language = None
199
200 # List of patterns, relative to source directory, that match files and
201 # directories to ignore when looking for source files.
202 # This pattern also affects html_static_path and html_extra_path .
203 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
204
205 # The name of the Pygments (syntax highlighting) style to use.
206 pygments_style = 'sphinx'
207
208 # -- Options for HTML output ---------------------------------------------
209
210 html_theme = 'sphinx_rtd_theme'
211 html_favicon = 'favicon.ico'
212 # html_theme_options = {}
213
214 # Add any paths that contain custom static files (such as style sheets) here,
215 # relative to this directory. They are copied after the builtin static files,
216 # so a file named "default.css" will overwrite the builtin "default.css".
217 html_static_path = ['_static']
218
219 # Custom sidebar templates, must be a dictionary that maps document names
220 # to template names.
221 #
222 # The default sidebars (for documents that don't match any pattern) are
223 # defined by theme itself. Builtin themes are using these templates by
224 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
225 # 'searchbox.html']``.
226 #
227 # html_sidebars = {}
228
229 html_logo = 'docs/images/Cirq_logo_notext.png'
230 html_css_files = ['tweak-style.css']
231
232 # -- Options for HTMLHelp output -----------------------------------------
233
234 # Output file base name for HTML help builder.
235 htmlhelp_basename = 'Cirqdoc'
236
237 # -- Options for LaTeX output --------------------------------------------
238
239 latex_elements = {
240 # The paper size ('letterpaper' or 'a4paper').
241 'papersize': 'letterpaper',
242
243 # The font size ('10pt', '11pt' or '12pt').
244 # 'pointsize': '10pt',
245
246 # Additional stuff for the LaTeX preamble.
247 # 'preamble': '',
248
249 # Latex figure (float) alignment
250 # 'figure_align': 'htbp',
251 }
252
253 # Grouping the document tree into LaTeX files. List of tuples
254 # (source start file, target name, title,
255 # author, documentclass [howto, manual, or own class]).
256 latex_documents = [
257 (master_doc, 'Cirq.tex', 'Cirq Documentation', 'The Cirq Developers',
258 'manual'),
259 ]
260
261 # -- Options for manual page output --------------------------------------
262
263 # One entry per manual page. List of tuples
264 # (source start file, name, description, authors, manual section).
265 man_pages = [(master_doc, 'cirq', 'Cirq Documentation', [author], 1)]
266
267 # -- Options for Texinfo output ------------------------------------------
268
269 # Grouping the document tree into Texinfo files. List of tuples
270 # (source start file, target name, title, author,
271 # dir menu entry, description, category)
272 texinfo_documents = [
273 (master_doc, 'Cirq', 'Cirq Documentation', author, 'Cirq',
274 'A python library for NISQ circuits.', 'Miscellaneous'),
275 ]
276
277 # -- Extension configuration -------------------------------------------------
278
279 # Generate subpages for reference docs automatically.
280 # http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically
281 autosummary_generate = True
282
283 # to resolve name clashes between the generated files
284 autosummary_filename_map = {"cirq.QFT": "cirq.QFT_deprecated"}
285
286 myst_update_mathjax = False
287
288 # To allow for google.colab temporarily in notebooks
289 # TODO: after https://github.com/quantumlib/Cirq/issues/3368 turn this back off
290 nbsphinx_allow_errors = True
291
[end of rtd_docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rtd_docs/conf.py b/rtd_docs/conf.py
--- a/rtd_docs/conf.py
+++ b/rtd_docs/conf.py
@@ -132,8 +132,14 @@
def source_read(app, docname, source):
- source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"',
- r'"**\1**"', source[0])
+ source[0] = re.sub(r'"##### (Copyright 20\d\d The Cirq Developers)"', r'""',
+ source[0])
+ source[0] = re.sub(
+ r'(\{\s*?"cell_type": "code".*?"#@title.*License.".*?\},)',
+ r'',
+ source[0],
+ flags=re.S)
+
source[0] = re.sub(r'"<table.*tfo-notebook-buttons.*"</table>"',
r'""',
source[0],
| {"golden_diff": "diff --git a/rtd_docs/conf.py b/rtd_docs/conf.py\n--- a/rtd_docs/conf.py\n+++ b/rtd_docs/conf.py\n@@ -132,8 +132,14 @@\n \n \n def source_read(app, docname, source):\n- source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"',\n- r'\"**\\1**\"', source[0])\n+ source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"', r'\"\"',\n+ source[0])\n+ source[0] = re.sub(\n+ r'(\\{\\s*?\"cell_type\": \"code\".*?\"#@title.*License.\".*?\\},)',\n+ r'',\n+ source[0],\n+ flags=re.S)\n+\n source[0] = re.sub(r'\"<table.*tfo-notebook-buttons.*\"</table>\"',\n r'\"\"',\n source[0],\n", "issue": "Read the docs has goofy copyright\n**Description of the issue**\r\nCopyright shows up on cirq tutorials on readthedocs.\r\n\r\n**How to reproduce the issue**\r\nGo to https://cirq.readthedocs.io/en/latest/docs/gates.html or other pages. Top part says \"Copyright 2020 The Cirq Developers\" with apache license.\r\n\r\n**Cirq version**\r\n0.9.1. It probably broke when we adjusted all the copyright notices.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# coverage: ignore\n\n# The content for all documentation lives in ../docs. That folder is\n# following the structure for the Google Quantum site configured for the\n# internal CMS, devsite. The readthedocs layer is a secondary, which generates\n# the content using sphinx to readthedocs.io until we go live with the devsite.\n#\n# This is the configuration file for the Sphinx documentation builder.\n# See http://www.sphinx-doc.org/en/master/config for help\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport inspect\nimport re\nfrom typing import List, Any\n\nimport os\nimport sys\nimport shutil\n\nimport pypandoc\n\ncirq_root_path = os.path.dirname(os.path.dirname(__file__))\nsys.path.insert(0, cirq_root_path)\nfrom cirq import _doc\n\n\ndef setup(app):\n # just in case it exists (locally) remove the copied docs folder\n shutil.rmtree(\"./docs\", ignore_errors=True)\n # copy recursively the actual content from the devsite folder\n # to rtd_docs/docs\n shutil.copytree(src=\"../docs\", dst=\"./docs\")\n app.add_config_value('pandoc_use_parser', 'markdown', True)\n app.connect('autodoc-process-docstring', autodoc_process)\n app.connect('autodoc-skip-member', autodoc_skip_member)\n app.connect('source-read', source_read)\n\n\ndef convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:\n if all('$$' not in line for line in lines):\n return lines\n\n data = '\\n'.join(lines)\n sections = data.split('$$')\n if len(sections) % 2 != 1:\n raise ValueError('Mismatched number of \"$$\" latex tokens.')\n\n result = []\n for i, s in enumerate(sections):\n if i % 2:\n # Avoid getting split across divs.\n s = ' '.join(s.split('\\n'))\n # Avoid intermediate layers turning our newlines into slashes.\n s = s.replace('\\\\\\\\', r'\\newline')\n # Turn latex like \"|x\\rangle\" into \"|x \\rangle\".\n # The extra space seems to be necessary to survive a later pass.\n s = re.sub(r'([a-zA-Z0-9])\\\\', r'\\1 \\\\', s)\n # Keep the $$ so MathJax can find it.\n result.append('$${}$$'.format(s))\n else:\n # Work around bad table detection in pandoc by concatenating\n # lines from the same paragraph.\n s = '\\n\\n'.join(e.replace('\\n', ' ') for e in s.split('\\n\\n'))\n\n # Convert markdown to rst.\n out = pypandoc.convert(s, to='rst', format='markdown_github')\n\n # Not sure why pandoc is escaping these...\n out = out.replace(r'\\|', '|')\n\n result.extend(out.split('\\n'))\n\n return result\n\n\ndef autodoc_skip_member(\n app,\n what: str,\n name: str,\n obj: Any,\n skip: bool,\n options,\n) -> bool:\n \"\"\"Public members already kept. Also include members marked as documented.\n \"\"\"\n # Never skip if explicitly whitelisted.\n if id(obj) in _doc.RECORDED_CONST_DOCS:\n return False\n # Skip all private methods.\n if name.startswith('_'):\n return True\n # Fallback to default.\n return skip\n\n\ndef autodoc_process(app, what: str, name: str, obj: Any, options,\n lines: List[str]) -> None:\n # Try to lookup in documented dictionary.\n doc_string = _doc.RECORDED_CONST_DOCS.get(id(obj))\n if name.startswith('cirq') and doc_string is not None:\n # Override docstring if requested.\n if doc_string is not None:\n new_doc_string = inspect.cleandoc(doc_string)\n lines[:] = new_doc_string.split('\\n')\n elif not (getattr(obj, '__module__', 'cirq') or '').startswith('cirq'):\n # Don't convert objects from other modules.\n return\n\n # Don't convert output from Napoleon extension, which is already rst.\n i = 0\n while i < len(lines) and not lines[i].startswith(':'):\n i += 1\n if not i:\n return\n\n converted_lines = convert_markdown_mathjax_for_rst(lines[:i])\n kept_lines = lines[i:]\n\n data = pypandoc.convert(\n '\\n'.join(converted_lines),\n to='rst',\n format='markdown_github',\n )\n\n lines[:] = data.split('\\n') + kept_lines\n\n\ndef source_read(app, docname, source):\n source[0] = re.sub(r'\"##### (Copyright 20\\d\\d The Cirq Developers)\"',\n r'\"**\\1**\"', source[0])\n source[0] = re.sub(r'\"<table.*tfo-notebook-buttons.*\"</table>\"',\n r'\"\"',\n source[0],\n flags=re.S)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Cirq'\ncopyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin\nauthor = 'The Cirq Developers'\n\n# The full version, including alpha/beta/rc tags\n__version__ = ''\nexec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())\nrelease = __version__\n\n# The short X.Y version\nversion = release # '.'.join(release.split('.')[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'myst_parser',\n 'nbsphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx_markdown_tables',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# Allow markdown includes.\n# http://www.sphinx-doc.org/en/master/markdown.html\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.md': 'markdown',\n}\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output ---------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_favicon = 'favicon.ico'\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_logo = 'docs/images/Cirq_logo_notext.png'\nhtml_css_files = ['tweak-style.css']\n\n# -- Options for HTMLHelp output -----------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Cirqdoc'\n\n# -- Options for LaTeX output --------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Cirq.tex', 'Cirq Documentation', 'The Cirq Developers',\n 'manual'),\n]\n\n# -- Options for manual page output --------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'cirq', 'Cirq Documentation', [author], 1)]\n\n# -- Options for Texinfo output ------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Cirq', 'Cirq Documentation', author, 'Cirq',\n 'A python library for NISQ circuits.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\n# Generate subpages for reference docs automatically.\n# http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically\nautosummary_generate = True\n\n# to resolve name clashes between the generated files\nautosummary_filename_map = {\"cirq.QFT\": \"cirq.QFT_deprecated\"}\n\nmyst_update_mathjax = False\n\n# To allow for google.colab temporarily in notebooks\n# TODO: after https://github.com/quantumlib/Cirq/issues/3368 turn this back off\nnbsphinx_allow_errors = True\n", "path": "rtd_docs/conf.py"}]} | 3,755 | 218 |
gh_patches_debug_16584 | rasdani/github-patches | git_diff | fossasia__open-event-server-6510 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cron job for marking event invoices due is wrong
PR https://github.com/fossasia/open-event-server/pull/6166 is wrong. The query fails in every case
https://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L164-L174
This line is not valid SQLAlchemy syntax:
https://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L169
`Neither 'InstrumentedAttribute' object nor 'Comparator' object associated with EventInvoice.event has an attribute 'ends_at'`
It should be `db.session.query(EventInvoice).join(Event).filter(EventInvoice.status == 'upcoming', Event.ends_at >= datetime.datetime.now()).all()`
Each run of this task is failing
Fix it and write tests to ensure it is working
</issue>
<code>
[start of app/api/helpers/scheduled_jobs.py]
1 import datetime
2
3 import pytz
4 from dateutil.relativedelta import relativedelta
5 from flask import render_template
6
7 from app.api.helpers.db import safe_query, save_to_db
8 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \
9 send_followup_email_for_monthly_fee_payment
10 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \
11 send_notif_after_event
12 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name
13 from app.api.helpers.utilities import monthdelta
14 from app.api.helpers.files import create_save_pdf
15 from app.api.helpers.storage import UPLOAD_PATHS
16 from app.models import db
17 from app.models.event import Event
18 from app.models.event_invoice import EventInvoice
19 from app.models.order import Order
20 from app.models.speaker import Speaker
21 from app.models.session import Session
22 from app.models.ticket import Ticket
23 from app.models.ticket_fee import TicketFees, get_fee
24
25 from app.settings import get_settings
26
27
28 def send_after_event_mail():
29 from app import current_app as app
30 with app.app_context():
31 events = Event.query.filter_by(state='published', deleted_at=None).all()
32 upcoming_events = get_upcoming_events()
33 upcoming_event_links = "<ul>"
34 for upcoming_event in upcoming_events:
35 frontend_url = get_settings()['frontend_url']
36 upcoming_event_links += "<li><a href='{}/events/{}'>{}</a></li>" \
37 .format(frontend_url, upcoming_event.id, upcoming_event.name)
38 upcoming_event_links += "</ul>"
39 for event in events:
40 organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
41 speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()
42 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
43 current_time = datetime.datetime.now(pytz.timezone(event.timezone))
44 time_difference = current_time - event.ends_at
45 time_difference_minutes = (time_difference.days * 24 * 60) + \
46 (time_difference.seconds / 60)
47 if current_time > event.ends_at and time_difference_minutes < 1440:
48 for speaker in speakers:
49 if not speaker.is_email_overridden:
50 send_email_after_event(speaker.user.email, event.name, upcoming_event_links)
51 send_notif_after_event(speaker.user, event.name)
52 for organizer in organizers:
53 send_email_after_event(organizer.user.email, event.name, upcoming_event_links)
54 send_notif_after_event(organizer.user, event.name)
55 if owner:
56 send_email_after_event(owner.user.email, event.name, upcoming_event_links)
57 send_notif_after_event(owner.user, event.name)
58
59
60 def change_session_state_on_event_completion():
61 from app import current_app as app
62 with app.app_context():
63 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\
64 .filter(Event.ends_at < datetime.datetime.now())
65 for session in sessions_to_be_changed:
66 session.state = 'rejected'
67 save_to_db(session, 'Changed {} session state to rejected'.format(session.title))
68
69
70 def send_event_fee_notification():
71 from app import current_app as app
72 with app.app_context():
73 events = Event.query.filter_by(deleted_at=None, state='published').all()
74 for event in events:
75 latest_invoice = EventInvoice.query.filter_by(
76 event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
77
78 if latest_invoice:
79 orders = Order.query \
80 .filter_by(event_id=event.id) \
81 .filter_by(status='completed') \
82 .filter(Order.completed_at > latest_invoice.created_at).all()
83 else:
84 orders = Order.query.filter_by(
85 event_id=event.id).filter_by(status='completed').all()
86
87 fee_total = 0
88 for order in orders:
89 for order_ticket in order.tickets:
90 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')
91 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
92 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
93 fee_total += fee
94
95 if fee_total > 0:
96 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
97 new_invoice = EventInvoice(
98 amount=fee_total, event_id=event.id, user_id=owner.user.id)
99
100 if event.discount_code_id and event.discount_code:
101 r = relativedelta(datetime.utcnow(), event.created_at)
102 if r <= event.discount_code.valid_till:
103 new_invoice.amount = fee_total - \
104 (fee_total * (event.discount_code.value / 100.0))
105 new_invoice.discount_code_id = event.discount_code_id
106
107 save_to_db(new_invoice)
108 prev_month = monthdelta(new_invoice.created_at, 1).strftime(
109 "%b %Y") # Displayed as Aug 2016
110 app_name = get_settings()['app_name']
111 frontend_url = get_settings()['frontend_url']
112 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)
113 send_email_for_monthly_fee_payment(new_invoice.user.email,
114 event.name,
115 prev_month,
116 new_invoice.amount,
117 app_name,
118 link)
119 send_notif_monthly_fee_payment(new_invoice.user,
120 event.name,
121 prev_month,
122 new_invoice.amount,
123 app_name,
124 link,
125 new_invoice.event_id)
126
127
128 def send_event_fee_notification_followup():
129 from app import current_app as app
130 with app.app_context():
131 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()
132 for incomplete_invoice in incomplete_invoices:
133 if incomplete_invoice.amount > 0:
134 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(
135 "%b %Y") # Displayed as Aug 2016
136 app_name = get_settings()['app_name']
137 frontend_url = get_settings()['frontend_url']
138 link = '{}/event-invoice/{}/review'.format(frontend_url,
139 incomplete_invoice.identifier)
140 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
141 incomplete_invoice.event.name,
142 prev_month,
143 incomplete_invoice.amount,
144 app_name,
145 link)
146 send_followup_notif_monthly_fee_payment(incomplete_invoice.user,
147 incomplete_invoice.event.name,
148 prev_month,
149 incomplete_invoice.amount,
150 app_name,
151 link,
152 incomplete_invoice.event.id)
153
154
155 def expire_pending_tickets():
156 from app import current_app as app
157 with app.app_context():
158 db.session.query(Order).filter(Order.status == 'pending',
159 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\
160 update({'status': 'expired'})
161 db.session.commit()
162
163
164 def event_invoices_mark_due():
165 from app import current_app as app
166 with app.app_context():
167 db.session.query(EventInvoice).\
168 filter(EventInvoice.status == 'upcoming',
169 EventInvoice.event.ends_at >= datetime.datetime.now(),
170 (EventInvoice.created_at + datetime.timedelta(days=30) <=
171 datetime.datetime.now())).\
172 update({'status': 'due'})
173
174 db.session.commit()
175
176
177 def send_monthly_event_invoice():
178 from app import current_app as app
179 with app.app_context():
180 events = Event.query.filter_by(deleted_at=None, state='published').all()
181 for event in events:
182 # calculate net & gross revenues
183 user = event.owner
184 admin_info = get_settings()
185 currency = event.payment_currency
186 ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()
187 ticket_fee_percentage = ticket_fee_object.service_fee
188 ticket_fee_maximum = ticket_fee_object.maximum_fee
189 orders = Order.query.filter_by(event=event).all()
190 gross_revenue = event.calc_monthly_revenue()
191 ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)
192 if ticket_fees > ticket_fee_maximum:
193 ticket_fees = ticket_fee_maximum
194 net_revenue = gross_revenue - ticket_fees
195 payment_details = {
196 'tickets_sold': event.tickets_sold,
197 'gross_revenue': gross_revenue,
198 'net_revenue': net_revenue,
199 'amount_payable': ticket_fees
200 }
201 # save invoice as pdf
202 pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,
203 admin_info=admin_info, currency=currency, event=event,
204 ticket_fee_object=ticket_fee_object, payment_details=payment_details,
205 net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],
206 dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)
207 # save event_invoice info to DB
208
209 event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)
210 save_to_db(event_invoice)
211
[end of app/api/helpers/scheduled_jobs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py
--- a/app/api/helpers/scheduled_jobs.py
+++ b/app/api/helpers/scheduled_jobs.py
@@ -164,14 +164,12 @@
def event_invoices_mark_due():
from app import current_app as app
with app.app_context():
- db.session.query(EventInvoice).\
- filter(EventInvoice.status == 'upcoming',
- EventInvoice.event.ends_at >= datetime.datetime.now(),
- (EventInvoice.created_at + datetime.timedelta(days=30) <=
- datetime.datetime.now())).\
- update({'status': 'due'})
-
- db.session.commit()
+ db.session.query(EventInvoice).filter(
+ EventInvoice.status == 'upcoming',
+ Event.id == EventInvoice.event_id,
+ Event.ends_at >= datetime.datetime.now(),
+ (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())
+ ).update({EventInvoice.status: 'due'}, synchronize_session=False)
def send_monthly_event_invoice():
| {"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -164,14 +164,12 @@\n def event_invoices_mark_due():\n from app import current_app as app\n with app.app_context():\n- db.session.query(EventInvoice).\\\n- filter(EventInvoice.status == 'upcoming',\n- EventInvoice.event.ends_at >= datetime.datetime.now(),\n- (EventInvoice.created_at + datetime.timedelta(days=30) <=\n- datetime.datetime.now())).\\\n- update({'status': 'due'})\n-\n- db.session.commit()\n+ db.session.query(EventInvoice).filter(\n+ EventInvoice.status == 'upcoming',\n+ Event.id == EventInvoice.event_id,\n+ Event.ends_at >= datetime.datetime.now(),\n+ (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())\n+ ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n \n \n def send_monthly_event_invoice():\n", "issue": "Cron job for marking event invoices due is wrong\nPR https://github.com/fossasia/open-event-server/pull/6166 is wrong. The query fails in every case\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L164-L174\r\n\r\nThis line is not valid SQLAlchemy syntax:\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/bc45c1d267b924b556d36855f29c1dd96e901209/app/api/helpers/scheduled_jobs.py#L169\r\n\r\n`Neither 'InstrumentedAttribute' object nor 'Comparator' object associated with EventInvoice.event has an attribute 'ends_at'`\r\n\r\nIt should be `db.session.query(EventInvoice).join(Event).filter(EventInvoice.status == 'upcoming', Event.ends_at >= datetime.datetime.now()).all()`\r\n\r\nEach run of this task is failing\r\n\r\nFix it and write tests to ensure it is working\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom flask import render_template\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees, get_fee\n\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, upcoming_event_links)\n send_notif_after_event(owner.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n\n\ndef event_invoices_mark_due():\n from app import current_app as app\n with app.app_context():\n db.session.query(EventInvoice).\\\n filter(EventInvoice.status == 'upcoming',\n EventInvoice.event.ends_at >= datetime.datetime.now(),\n (EventInvoice.created_at + datetime.timedelta(days=30) <=\n datetime.datetime.now())).\\\n update({'status': 'due'})\n\n db.session.commit()\n\n\ndef send_monthly_event_invoice():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)\n if ticket_fees > ticket_fee_maximum:\n ticket_fees = ticket_fee_maximum\n net_revenue = gross_revenue - ticket_fees\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': ticket_fees\n }\n # save invoice as pdf\n pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,\n admin_info=admin_info, currency=currency, event=event,\n ticket_fee_object=ticket_fee_object, payment_details=payment_details,\n net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)\n save_to_db(event_invoice)\n", "path": "app/api/helpers/scheduled_jobs.py"}]} | 3,260 | 234 |
gh_patches_debug_4799 | rasdani/github-patches | git_diff | quantumlib__Cirq-4319 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JSON backwards compatibility has been flushed
https://github.com/quantumlib/Cirq/pull/4099 cleared some deprecated classes. Serialized files have a much longer lifespan than code. It was always the intent to keep much longer-term backwards compatibility for data files, especially experimental results which likely have `cirq.TrialResult` classes.
1) the old TrialResult.json should be kept around like the "inward" files
2) The resolver should re-direct "TrialResult" keys to `cirq.Result`. In some sense, this should have been done along with the initial rename.
</issue>
<code>
[start of cirq-core/cirq/json_resolver_cache.py]
1 # Copyright 2020 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import functools
16 from typing import Dict, TYPE_CHECKING
17
18 from cirq.protocols.json_serialization import ObjectFactory
19
20 if TYPE_CHECKING:
21 import cirq.ops.pauli_gates
22 import cirq.devices.unconstrained_device
23
24
25 @functools.lru_cache(maxsize=1)
26 def _class_resolver_dictionary() -> Dict[str, ObjectFactory]:
27 import cirq
28 from cirq.ops import raw_types
29 import pandas as pd
30 import numpy as np
31 from cirq.devices.noise_model import _NoNoiseModel
32 from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer
33 from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata
34
35 def _identity_operation_from_dict(qubits, **kwargs):
36 return cirq.identity_each(*qubits)
37
38 def single_qubit_matrix_gate(matrix):
39 if not isinstance(matrix, np.ndarray):
40 matrix = np.array(matrix, dtype=np.complex128)
41 return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))
42
43 def two_qubit_matrix_gate(matrix):
44 if not isinstance(matrix, np.ndarray):
45 matrix = np.array(matrix, dtype=np.complex128)
46 return cirq.MatrixGate(matrix, qid_shape=(2, 2))
47
48 import sympy
49
50 return {
51 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,
52 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,
53 'BitFlipChannel': cirq.BitFlipChannel,
54 'BitstringAccumulator': cirq.work.BitstringAccumulator,
55 'ProductState': cirq.ProductState,
56 'CCNotPowGate': cirq.CCNotPowGate,
57 'CCXPowGate': cirq.CCXPowGate,
58 'CCZPowGate': cirq.CCZPowGate,
59 'CNotPowGate': cirq.CNotPowGate,
60 'ControlledGate': cirq.ControlledGate,
61 'ControlledOperation': cirq.ControlledOperation,
62 'CSwapGate': cirq.CSwapGate,
63 'CXPowGate': cirq.CXPowGate,
64 'CZPowGate': cirq.CZPowGate,
65 'CrossEntropyResult': CrossEntropyResult,
66 'CrossEntropyResultDict': CrossEntropyResultDict,
67 'Circuit': cirq.Circuit,
68 'CircuitOperation': cirq.CircuitOperation,
69 'CliffordState': cirq.CliffordState,
70 'CliffordTableau': cirq.CliffordTableau,
71 'DepolarizingChannel': cirq.DepolarizingChannel,
72 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,
73 'Duration': cirq.Duration,
74 'FrozenCircuit': cirq.FrozenCircuit,
75 'FSimGate': cirq.FSimGate,
76 'DensePauliString': cirq.DensePauliString,
77 'MutableDensePauliString': cirq.MutableDensePauliString,
78 'MutablePauliString': cirq.MutablePauliString,
79 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,
80 'GateOperation': cirq.GateOperation,
81 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,
82 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,
83 'GridInteractionLayer': GridInteractionLayer,
84 'GridParallelXEBMetadata': GridParallelXEBMetadata,
85 'GridQid': cirq.GridQid,
86 'GridQubit': cirq.GridQubit,
87 'HPowGate': cirq.HPowGate,
88 'ISwapPowGate': cirq.ISwapPowGate,
89 'IdentityGate': cirq.IdentityGate,
90 'IdentityOperation': _identity_operation_from_dict,
91 'InitObsSetting': cirq.work.InitObsSetting,
92 'LinearDict': cirq.LinearDict,
93 'LineQubit': cirq.LineQubit,
94 'LineQid': cirq.LineQid,
95 'MatrixGate': cirq.MatrixGate,
96 'MeasurementKey': cirq.MeasurementKey,
97 'MeasurementGate': cirq.MeasurementGate,
98 '_MeasurementSpec': cirq.work._MeasurementSpec,
99 'Moment': cirq.Moment,
100 '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore
101 '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore
102 '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore
103 '_NoNoiseModel': _NoNoiseModel,
104 'NamedQubit': cirq.NamedQubit,
105 'NamedQid': cirq.NamedQid,
106 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,
107 '_PauliX': cirq.ops.pauli_gates._PauliX,
108 '_PauliY': cirq.ops.pauli_gates._PauliY,
109 '_PauliZ': cirq.ops.pauli_gates._PauliZ,
110 'ParamResolver': cirq.ParamResolver,
111 'ParallelGateOperation': cirq.ParallelGateOperation,
112 'PauliString': cirq.PauliString,
113 'PhaseDampingChannel': cirq.PhaseDampingChannel,
114 'PhaseFlipChannel': cirq.PhaseFlipChannel,
115 'PhaseGradientGate': cirq.PhaseGradientGate,
116 'PhasedFSimGate': cirq.PhasedFSimGate,
117 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,
118 'PhasedXPowGate': cirq.PhasedXPowGate,
119 'PhasedXZGate': cirq.PhasedXZGate,
120 'RandomGateChannel': cirq.RandomGateChannel,
121 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,
122 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,
123 'ResetChannel': cirq.ResetChannel,
124 'SingleQubitMatrixGate': single_qubit_matrix_gate,
125 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,
126 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,
127 'StabilizerStateChForm': cirq.StabilizerStateChForm,
128 'SwapPowGate': cirq.SwapPowGate,
129 'SymmetricalQidPair': cirq.SymmetricalQidPair,
130 'TaggedOperation': cirq.TaggedOperation,
131 'Result': cirq.Result,
132 'Rx': cirq.Rx,
133 'Ry': cirq.Ry,
134 'Rz': cirq.Rz,
135 'TwoQubitMatrixGate': two_qubit_matrix_gate,
136 '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,
137 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,
138 'VirtualTag': cirq.VirtualTag,
139 'WaitGate': cirq.WaitGate,
140 '_QubitAsQid': raw_types._QubitAsQid,
141 # The formatter keeps putting this back
142 # pylint: disable=line-too-long
143 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,
144 # pylint: enable=line-too-long
145 'XPowGate': cirq.XPowGate,
146 'XXPowGate': cirq.XXPowGate,
147 'YPowGate': cirq.YPowGate,
148 'YYPowGate': cirq.YYPowGate,
149 'ZPowGate': cirq.ZPowGate,
150 'ZZPowGate': cirq.ZZPowGate,
151 # not a cirq class, but treated as one:
152 'pandas.DataFrame': pd.DataFrame,
153 'pandas.Index': pd.Index,
154 'pandas.MultiIndex': pd.MultiIndex.from_tuples,
155 'sympy.Symbol': sympy.Symbol,
156 'sympy.Add': lambda args: sympy.Add(*args),
157 'sympy.Mul': lambda args: sympy.Mul(*args),
158 'sympy.Pow': lambda args: sympy.Pow(*args),
159 'sympy.Float': lambda approx: sympy.Float(approx),
160 'sympy.Integer': sympy.Integer,
161 'sympy.Rational': sympy.Rational,
162 'sympy.pi': lambda: sympy.pi,
163 'sympy.E': lambda: sympy.E,
164 'sympy.EulerGamma': lambda: sympy.EulerGamma,
165 'complex': complex,
166 }
167
[end of cirq-core/cirq/json_resolver_cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq-core/cirq/json_resolver_cache.py b/cirq-core/cirq/json_resolver_cache.py
--- a/cirq-core/cirq/json_resolver_cache.py
+++ b/cirq-core/cirq/json_resolver_cache.py
@@ -128,6 +128,7 @@
'SwapPowGate': cirq.SwapPowGate,
'SymmetricalQidPair': cirq.SymmetricalQidPair,
'TaggedOperation': cirq.TaggedOperation,
+ 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.
'Result': cirq.Result,
'Rx': cirq.Rx,
'Ry': cirq.Ry,
| {"golden_diff": "diff --git a/cirq-core/cirq/json_resolver_cache.py b/cirq-core/cirq/json_resolver_cache.py\n--- a/cirq-core/cirq/json_resolver_cache.py\n+++ b/cirq-core/cirq/json_resolver_cache.py\n@@ -128,6 +128,7 @@\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'TaggedOperation': cirq.TaggedOperation,\n+ 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n", "issue": "JSON backwards compatibility has been flushed\nhttps://github.com/quantumlib/Cirq/pull/4099 cleared some deprecated classes. Serialized files have a much longer lifespan than code. It was always the intent to keep much longer-term backwards compatibility for data files, especially experimental results which likely have `cirq.TrialResult` classes.\r\n\r\n1) the old TrialResult.json should be kept around like the \"inward\" files \r\n2) The resolver should re-direct \"TrialResult\" keys to `cirq.Result`. In some sense, this should have been done along with the initial rename. \n", "before_files": [{"content": "# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom typing import Dict, TYPE_CHECKING\n\nfrom cirq.protocols.json_serialization import ObjectFactory\n\nif TYPE_CHECKING:\n import cirq.ops.pauli_gates\n import cirq.devices.unconstrained_device\n\n\[email protected]_cache(maxsize=1)\ndef _class_resolver_dictionary() -> Dict[str, ObjectFactory]:\n import cirq\n from cirq.ops import raw_types\n import pandas as pd\n import numpy as np\n from cirq.devices.noise_model import _NoNoiseModel\n from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer\n from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata\n\n def _identity_operation_from_dict(qubits, **kwargs):\n return cirq.identity_each(*qubits)\n\n def single_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))\n\n def two_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(2, 2))\n\n import sympy\n\n return {\n 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,\n 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,\n 'BitFlipChannel': cirq.BitFlipChannel,\n 'BitstringAccumulator': cirq.work.BitstringAccumulator,\n 'ProductState': cirq.ProductState,\n 'CCNotPowGate': cirq.CCNotPowGate,\n 'CCXPowGate': cirq.CCXPowGate,\n 'CCZPowGate': cirq.CCZPowGate,\n 'CNotPowGate': cirq.CNotPowGate,\n 'ControlledGate': cirq.ControlledGate,\n 'ControlledOperation': cirq.ControlledOperation,\n 'CSwapGate': cirq.CSwapGate,\n 'CXPowGate': cirq.CXPowGate,\n 'CZPowGate': cirq.CZPowGate,\n 'CrossEntropyResult': CrossEntropyResult,\n 'CrossEntropyResultDict': CrossEntropyResultDict,\n 'Circuit': cirq.Circuit,\n 'CircuitOperation': cirq.CircuitOperation,\n 'CliffordState': cirq.CliffordState,\n 'CliffordTableau': cirq.CliffordTableau,\n 'DepolarizingChannel': cirq.DepolarizingChannel,\n 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,\n 'Duration': cirq.Duration,\n 'FrozenCircuit': cirq.FrozenCircuit,\n 'FSimGate': cirq.FSimGate,\n 'DensePauliString': cirq.DensePauliString,\n 'MutableDensePauliString': cirq.MutableDensePauliString,\n 'MutablePauliString': cirq.MutablePauliString,\n 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,\n 'GateOperation': cirq.GateOperation,\n 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,\n 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,\n 'GridInteractionLayer': GridInteractionLayer,\n 'GridParallelXEBMetadata': GridParallelXEBMetadata,\n 'GridQid': cirq.GridQid,\n 'GridQubit': cirq.GridQubit,\n 'HPowGate': cirq.HPowGate,\n 'ISwapPowGate': cirq.ISwapPowGate,\n 'IdentityGate': cirq.IdentityGate,\n 'IdentityOperation': _identity_operation_from_dict,\n 'InitObsSetting': cirq.work.InitObsSetting,\n 'LinearDict': cirq.LinearDict,\n 'LineQubit': cirq.LineQubit,\n 'LineQid': cirq.LineQid,\n 'MatrixGate': cirq.MatrixGate,\n 'MeasurementKey': cirq.MeasurementKey,\n 'MeasurementGate': cirq.MeasurementGate,\n '_MeasurementSpec': cirq.work._MeasurementSpec,\n 'Moment': cirq.Moment,\n '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore\n '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore\n '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore\n '_NoNoiseModel': _NoNoiseModel,\n 'NamedQubit': cirq.NamedQubit,\n 'NamedQid': cirq.NamedQid,\n 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,\n '_PauliX': cirq.ops.pauli_gates._PauliX,\n '_PauliY': cirq.ops.pauli_gates._PauliY,\n '_PauliZ': cirq.ops.pauli_gates._PauliZ,\n 'ParamResolver': cirq.ParamResolver,\n 'ParallelGateOperation': cirq.ParallelGateOperation,\n 'PauliString': cirq.PauliString,\n 'PhaseDampingChannel': cirq.PhaseDampingChannel,\n 'PhaseFlipChannel': cirq.PhaseFlipChannel,\n 'PhaseGradientGate': cirq.PhaseGradientGate,\n 'PhasedFSimGate': cirq.PhasedFSimGate,\n 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,\n 'PhasedXPowGate': cirq.PhasedXPowGate,\n 'PhasedXZGate': cirq.PhasedXZGate,\n 'RandomGateChannel': cirq.RandomGateChannel,\n 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,\n 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,\n 'ResetChannel': cirq.ResetChannel,\n 'SingleQubitMatrixGate': single_qubit_matrix_gate,\n 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,\n 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,\n 'StabilizerStateChForm': cirq.StabilizerStateChForm,\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'TaggedOperation': cirq.TaggedOperation,\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n 'Rz': cirq.Rz,\n 'TwoQubitMatrixGate': two_qubit_matrix_gate,\n '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,\n 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,\n 'VirtualTag': cirq.VirtualTag,\n 'WaitGate': cirq.WaitGate,\n '_QubitAsQid': raw_types._QubitAsQid,\n # The formatter keeps putting this back\n # pylint: disable=line-too-long\n 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,\n # pylint: enable=line-too-long\n 'XPowGate': cirq.XPowGate,\n 'XXPowGate': cirq.XXPowGate,\n 'YPowGate': cirq.YPowGate,\n 'YYPowGate': cirq.YYPowGate,\n 'ZPowGate': cirq.ZPowGate,\n 'ZZPowGate': cirq.ZZPowGate,\n # not a cirq class, but treated as one:\n 'pandas.DataFrame': pd.DataFrame,\n 'pandas.Index': pd.Index,\n 'pandas.MultiIndex': pd.MultiIndex.from_tuples,\n 'sympy.Symbol': sympy.Symbol,\n 'sympy.Add': lambda args: sympy.Add(*args),\n 'sympy.Mul': lambda args: sympy.Mul(*args),\n 'sympy.Pow': lambda args: sympy.Pow(*args),\n 'sympy.Float': lambda approx: sympy.Float(approx),\n 'sympy.Integer': sympy.Integer,\n 'sympy.Rational': sympy.Rational,\n 'sympy.pi': lambda: sympy.pi,\n 'sympy.E': lambda: sympy.E,\n 'sympy.EulerGamma': lambda: sympy.EulerGamma,\n 'complex': complex,\n }\n", "path": "cirq-core/cirq/json_resolver_cache.py"}]} | 3,060 | 155 |
gh_patches_debug_4528 | rasdani/github-patches | git_diff | ansible-collections__community.general-2239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Examples of nomad_job_info not working
### Summary
The examples for nomad_job_info are using the module nomad_job instead of nomad_job_info.
### Issue Type
Documentation Report
### Component Name
nomad_job_info.py
### Ansible Version
```console (paste below)
$ ansible --version
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
_No response_
### Additional Information
Example:
- name: List Nomad jobs
community.general.nomad_job:
host: localhost
register: result
Instead of:
- name: List Nomad jobs
community.general.nomad_job_info:
host: localhost
register: result
### Code of Conduct
I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/clustering/nomad/nomad_job_info.py]
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2020, FERREIRA Christophe <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = '''
11 ---
12 module: nomad_job_info
13 author: FERREIRA Christophe (@chris93111)
14 version_added: "1.3.0"
15 short_description: Get Nomad Jobs info
16 description:
17 - Get info for one Nomad job.
18 - List Nomad jobs.
19 requirements:
20 - python-nomad
21 extends_documentation_fragment:
22 - community.general.nomad
23 options:
24 name:
25 description:
26 - Name of job for Get info.
27 - If not specified, lists all jobs.
28 type: str
29 notes:
30 - C(check_mode) is supported.
31 seealso:
32 - name: Nomad jobs documentation
33 description: Complete documentation for Nomad API jobs.
34 link: https://www.nomadproject.io/api-docs/jobs/
35 '''
36
37 EXAMPLES = '''
38 - name: Get info for job awx
39 community.general.nomad_job:
40 host: localhost
41 name: awx
42 register: result
43
44 - name: List Nomad jobs
45 community.general.nomad_job:
46 host: localhost
47 register: result
48
49 '''
50
51 RETURN = '''
52 result:
53 description: List with dictionary contains jobs info
54 returned: success
55 type: list
56 sample: [
57 {
58 "Affinities": null,
59 "AllAtOnce": false,
60 "Constraints": null,
61 "ConsulToken": "",
62 "CreateIndex": 13,
63 "Datacenters": [
64 "dc1"
65 ],
66 "Dispatched": false,
67 "ID": "example",
68 "JobModifyIndex": 13,
69 "Meta": null,
70 "ModifyIndex": 13,
71 "Multiregion": null,
72 "Name": "example",
73 "Namespace": "default",
74 "NomadTokenID": "",
75 "ParameterizedJob": null,
76 "ParentID": "",
77 "Payload": null,
78 "Periodic": null,
79 "Priority": 50,
80 "Region": "global",
81 "Spreads": null,
82 "Stable": false,
83 "Status": "pending",
84 "StatusDescription": "",
85 "Stop": false,
86 "SubmitTime": 1602244370615307000,
87 "TaskGroups": [
88 {
89 "Affinities": null,
90 "Constraints": null,
91 "Count": 1,
92 "EphemeralDisk": {
93 "Migrate": false,
94 "SizeMB": 300,
95 "Sticky": false
96 },
97 "Meta": null,
98 "Migrate": {
99 "HealthCheck": "checks",
100 "HealthyDeadline": 300000000000,
101 "MaxParallel": 1,
102 "MinHealthyTime": 10000000000
103 },
104 "Name": "cache",
105 "Networks": null,
106 "ReschedulePolicy": {
107 "Attempts": 0,
108 "Delay": 30000000000,
109 "DelayFunction": "exponential",
110 "Interval": 0,
111 "MaxDelay": 3600000000000,
112 "Unlimited": true
113 },
114 "RestartPolicy": {
115 "Attempts": 3,
116 "Delay": 15000000000,
117 "Interval": 1800000000000,
118 "Mode": "fail"
119 },
120 "Scaling": null,
121 "Services": null,
122 "ShutdownDelay": null,
123 "Spreads": null,
124 "StopAfterClientDisconnect": null,
125 "Tasks": [
126 {
127 "Affinities": null,
128 "Artifacts": null,
129 "CSIPluginConfig": null,
130 "Config": {
131 "image": "redis:3.2",
132 "port_map": [
133 {
134 "db": 6379.0
135 }
136 ]
137 },
138 "Constraints": null,
139 "DispatchPayload": null,
140 "Driver": "docker",
141 "Env": null,
142 "KillSignal": "",
143 "KillTimeout": 5000000000,
144 "Kind": "",
145 "Leader": false,
146 "Lifecycle": null,
147 "LogConfig": {
148 "MaxFileSizeMB": 10,
149 "MaxFiles": 10
150 },
151 "Meta": null,
152 "Name": "redis",
153 "Resources": {
154 "CPU": 500,
155 "Devices": null,
156 "DiskMB": 0,
157 "IOPS": 0,
158 "MemoryMB": 256,
159 "Networks": [
160 {
161 "CIDR": "",
162 "DNS": null,
163 "Device": "",
164 "DynamicPorts": [
165 {
166 "HostNetwork": "default",
167 "Label": "db",
168 "To": 0,
169 "Value": 0
170 }
171 ],
172 "IP": "",
173 "MBits": 10,
174 "Mode": "",
175 "ReservedPorts": null
176 }
177 ]
178 },
179 "RestartPolicy": {
180 "Attempts": 3,
181 "Delay": 15000000000,
182 "Interval": 1800000000000,
183 "Mode": "fail"
184 },
185 "Services": [
186 {
187 "AddressMode": "auto",
188 "CanaryMeta": null,
189 "CanaryTags": null,
190 "Checks": [
191 {
192 "AddressMode": "",
193 "Args": null,
194 "CheckRestart": null,
195 "Command": "",
196 "Expose": false,
197 "FailuresBeforeCritical": 0,
198 "GRPCService": "",
199 "GRPCUseTLS": false,
200 "Header": null,
201 "InitialStatus": "",
202 "Interval": 10000000000,
203 "Method": "",
204 "Name": "alive",
205 "Path": "",
206 "PortLabel": "",
207 "Protocol": "",
208 "SuccessBeforePassing": 0,
209 "TLSSkipVerify": false,
210 "TaskName": "",
211 "Timeout": 2000000000,
212 "Type": "tcp"
213 }
214 ],
215 "Connect": null,
216 "EnableTagOverride": false,
217 "Meta": null,
218 "Name": "redis-cache",
219 "PortLabel": "db",
220 "Tags": [
221 "global",
222 "cache"
223 ],
224 "TaskName": ""
225 }
226 ],
227 "ShutdownDelay": 0,
228 "Templates": null,
229 "User": "",
230 "Vault": null,
231 "VolumeMounts": null
232 }
233 ],
234 "Update": {
235 "AutoPromote": false,
236 "AutoRevert": false,
237 "Canary": 0,
238 "HealthCheck": "checks",
239 "HealthyDeadline": 180000000000,
240 "MaxParallel": 1,
241 "MinHealthyTime": 10000000000,
242 "ProgressDeadline": 600000000000,
243 "Stagger": 30000000000
244 },
245 "Volumes": null
246 }
247 ],
248 "Type": "service",
249 "Update": {
250 "AutoPromote": false,
251 "AutoRevert": false,
252 "Canary": 0,
253 "HealthCheck": "",
254 "HealthyDeadline": 0,
255 "MaxParallel": 1,
256 "MinHealthyTime": 0,
257 "ProgressDeadline": 0,
258 "Stagger": 30000000000
259 },
260 "VaultNamespace": "",
261 "VaultToken": "",
262 "Version": 0
263 }
264 ]
265
266 '''
267
268
269 import os
270 import json
271
272 from ansible.module_utils.basic import AnsibleModule, missing_required_lib
273 from ansible.module_utils._text import to_native
274
275 import_nomad = None
276 try:
277 import nomad
278 import_nomad = True
279 except ImportError:
280 import_nomad = False
281
282
283 def run():
284 module = AnsibleModule(
285 argument_spec=dict(
286 host=dict(required=True, type='str'),
287 use_ssl=dict(type='bool', default=True),
288 timeout=dict(type='int', default=5),
289 validate_certs=dict(type='bool', default=True),
290 client_cert=dict(type='path', default=None),
291 client_key=dict(type='path', default=None),
292 namespace=dict(type='str', default=None),
293 name=dict(type='str', default=None),
294 token=dict(type='str', default=None, no_log=True)
295 ),
296 supports_check_mode=True
297 )
298
299 if not import_nomad:
300 module.fail_json(msg=missing_required_lib("python-nomad"))
301
302 certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
303
304 nomad_client = nomad.Nomad(
305 host=module.params.get('host'),
306 secure=module.params.get('use_ssl'),
307 timeout=module.params.get('timeout'),
308 verify=module.params.get('validate_certs'),
309 cert=certificate_ssl,
310 namespace=module.params.get('namespace'),
311 token=module.params.get('token')
312 )
313
314 changed = False
315 result = list()
316 try:
317 job_list = nomad_client.jobs.get_jobs()
318 for job in job_list:
319 result.append(nomad_client.job.get_job(job.get('ID')))
320 except Exception as e:
321 module.fail_json(msg=to_native(e))
322
323 if module.params.get('name'):
324 filter = list()
325 try:
326 for job in result:
327 if job.get('ID') == module.params.get('name'):
328 filter.append(job)
329 result = filter
330 if not filter:
331 module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
332 except Exception as e:
333 module.fail_json(msg=to_native(e))
334
335 module.exit_json(changed=changed, result=result)
336
337
338 def main():
339
340 run()
341
342
343 if __name__ == "__main__":
344 main()
345
[end of plugins/modules/clustering/nomad/nomad_job_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py
--- a/plugins/modules/clustering/nomad/nomad_job_info.py
+++ b/plugins/modules/clustering/nomad/nomad_job_info.py
@@ -36,13 +36,13 @@
EXAMPLES = '''
- name: Get info for job awx
- community.general.nomad_job:
+ community.general.nomad_job_info:
host: localhost
name: awx
register: result
- name: List Nomad jobs
- community.general.nomad_job:
+ community.general.nomad_job_info:
host: localhost
register: result
| {"golden_diff": "diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py\n--- a/plugins/modules/clustering/nomad/nomad_job_info.py\n+++ b/plugins/modules/clustering/nomad/nomad_job_info.py\n@@ -36,13 +36,13 @@\n \n EXAMPLES = '''\n - name: Get info for job awx\n- community.general.nomad_job:\n+ community.general.nomad_job_info:\n host: localhost\n name: awx\n register: result\n \n - name: List Nomad jobs\n- community.general.nomad_job:\n+ community.general.nomad_job_info:\n host: localhost\n register: result\n", "issue": "Examples of nomad_job_info not working\n### Summary\n\nThe examples for nomad_job_info are using the module nomad_job instead of nomad_job_info.\n\n### Issue Type\n\nDocumentation Report\n\n### Component Name\n\nnomad_job_info.py\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\n\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\n_No response_\n\n### Additional Information\n\nExample:\r\n- name: List Nomad jobs\r\n community.general.nomad_job:\r\n host: localhost\r\n register: result\r\n\r\nInstead of:\r\n- name: List Nomad jobs\r\n community.general.nomad_job_info:\r\n host: localhost\r\n register: result\r\n\n\n### Code of Conduct\n\nI agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2020, FERREIRA Christophe <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: nomad_job_info\nauthor: FERREIRA Christophe (@chris93111)\nversion_added: \"1.3.0\"\nshort_description: Get Nomad Jobs info\ndescription:\n - Get info for one Nomad job.\n - List Nomad jobs.\nrequirements:\n - python-nomad\nextends_documentation_fragment:\n - community.general.nomad\noptions:\n name:\n description:\n - Name of job for Get info.\n - If not specified, lists all jobs.\n type: str\nnotes:\n - C(check_mode) is supported.\nseealso:\n - name: Nomad jobs documentation\n description: Complete documentation for Nomad API jobs.\n link: https://www.nomadproject.io/api-docs/jobs/\n'''\n\nEXAMPLES = '''\n- name: Get info for job awx\n community.general.nomad_job:\n host: localhost\n name: awx\n register: result\n\n- name: List Nomad jobs\n community.general.nomad_job:\n host: localhost\n register: result\n\n'''\n\nRETURN = '''\nresult:\n description: List with dictionary contains jobs info\n returned: success\n type: list\n sample: [\n {\n \"Affinities\": null,\n \"AllAtOnce\": false,\n \"Constraints\": null,\n \"ConsulToken\": \"\",\n \"CreateIndex\": 13,\n \"Datacenters\": [\n \"dc1\"\n ],\n \"Dispatched\": false,\n \"ID\": \"example\",\n \"JobModifyIndex\": 13,\n \"Meta\": null,\n \"ModifyIndex\": 13,\n \"Multiregion\": null,\n \"Name\": \"example\",\n \"Namespace\": \"default\",\n \"NomadTokenID\": \"\",\n \"ParameterizedJob\": null,\n \"ParentID\": \"\",\n \"Payload\": null,\n \"Periodic\": null,\n \"Priority\": 50,\n \"Region\": \"global\",\n \"Spreads\": null,\n \"Stable\": false,\n \"Status\": \"pending\",\n \"StatusDescription\": \"\",\n \"Stop\": false,\n \"SubmitTime\": 1602244370615307000,\n \"TaskGroups\": [\n {\n \"Affinities\": null,\n \"Constraints\": null,\n \"Count\": 1,\n \"EphemeralDisk\": {\n \"Migrate\": false,\n \"SizeMB\": 300,\n \"Sticky\": false\n },\n \"Meta\": null,\n \"Migrate\": {\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 300000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000\n },\n \"Name\": \"cache\",\n \"Networks\": null,\n \"ReschedulePolicy\": {\n \"Attempts\": 0,\n \"Delay\": 30000000000,\n \"DelayFunction\": \"exponential\",\n \"Interval\": 0,\n \"MaxDelay\": 3600000000000,\n \"Unlimited\": true\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Scaling\": null,\n \"Services\": null,\n \"ShutdownDelay\": null,\n \"Spreads\": null,\n \"StopAfterClientDisconnect\": null,\n \"Tasks\": [\n {\n \"Affinities\": null,\n \"Artifacts\": null,\n \"CSIPluginConfig\": null,\n \"Config\": {\n \"image\": \"redis:3.2\",\n \"port_map\": [\n {\n \"db\": 6379.0\n }\n ]\n },\n \"Constraints\": null,\n \"DispatchPayload\": null,\n \"Driver\": \"docker\",\n \"Env\": null,\n \"KillSignal\": \"\",\n \"KillTimeout\": 5000000000,\n \"Kind\": \"\",\n \"Leader\": false,\n \"Lifecycle\": null,\n \"LogConfig\": {\n \"MaxFileSizeMB\": 10,\n \"MaxFiles\": 10\n },\n \"Meta\": null,\n \"Name\": \"redis\",\n \"Resources\": {\n \"CPU\": 500,\n \"Devices\": null,\n \"DiskMB\": 0,\n \"IOPS\": 0,\n \"MemoryMB\": 256,\n \"Networks\": [\n {\n \"CIDR\": \"\",\n \"DNS\": null,\n \"Device\": \"\",\n \"DynamicPorts\": [\n {\n \"HostNetwork\": \"default\",\n \"Label\": \"db\",\n \"To\": 0,\n \"Value\": 0\n }\n ],\n \"IP\": \"\",\n \"MBits\": 10,\n \"Mode\": \"\",\n \"ReservedPorts\": null\n }\n ]\n },\n \"RestartPolicy\": {\n \"Attempts\": 3,\n \"Delay\": 15000000000,\n \"Interval\": 1800000000000,\n \"Mode\": \"fail\"\n },\n \"Services\": [\n {\n \"AddressMode\": \"auto\",\n \"CanaryMeta\": null,\n \"CanaryTags\": null,\n \"Checks\": [\n {\n \"AddressMode\": \"\",\n \"Args\": null,\n \"CheckRestart\": null,\n \"Command\": \"\",\n \"Expose\": false,\n \"FailuresBeforeCritical\": 0,\n \"GRPCService\": \"\",\n \"GRPCUseTLS\": false,\n \"Header\": null,\n \"InitialStatus\": \"\",\n \"Interval\": 10000000000,\n \"Method\": \"\",\n \"Name\": \"alive\",\n \"Path\": \"\",\n \"PortLabel\": \"\",\n \"Protocol\": \"\",\n \"SuccessBeforePassing\": 0,\n \"TLSSkipVerify\": false,\n \"TaskName\": \"\",\n \"Timeout\": 2000000000,\n \"Type\": \"tcp\"\n }\n ],\n \"Connect\": null,\n \"EnableTagOverride\": false,\n \"Meta\": null,\n \"Name\": \"redis-cache\",\n \"PortLabel\": \"db\",\n \"Tags\": [\n \"global\",\n \"cache\"\n ],\n \"TaskName\": \"\"\n }\n ],\n \"ShutdownDelay\": 0,\n \"Templates\": null,\n \"User\": \"\",\n \"Vault\": null,\n \"VolumeMounts\": null\n }\n ],\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"checks\",\n \"HealthyDeadline\": 180000000000,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 10000000000,\n \"ProgressDeadline\": 600000000000,\n \"Stagger\": 30000000000\n },\n \"Volumes\": null\n }\n ],\n \"Type\": \"service\",\n \"Update\": {\n \"AutoPromote\": false,\n \"AutoRevert\": false,\n \"Canary\": 0,\n \"HealthCheck\": \"\",\n \"HealthyDeadline\": 0,\n \"MaxParallel\": 1,\n \"MinHealthyTime\": 0,\n \"ProgressDeadline\": 0,\n \"Stagger\": 30000000000\n },\n \"VaultNamespace\": \"\",\n \"VaultToken\": \"\",\n \"Version\": 0\n }\n ]\n\n'''\n\n\nimport os\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible.module_utils._text import to_native\n\nimport_nomad = None\ntry:\n import nomad\n import_nomad = True\nexcept ImportError:\n import_nomad = False\n\n\ndef run():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(required=True, type='str'),\n use_ssl=dict(type='bool', default=True),\n timeout=dict(type='int', default=5),\n validate_certs=dict(type='bool', default=True),\n client_cert=dict(type='path', default=None),\n client_key=dict(type='path', default=None),\n namespace=dict(type='str', default=None),\n name=dict(type='str', default=None),\n token=dict(type='str', default=None, no_log=True)\n ),\n supports_check_mode=True\n )\n\n if not import_nomad:\n module.fail_json(msg=missing_required_lib(\"python-nomad\"))\n\n certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))\n\n nomad_client = nomad.Nomad(\n host=module.params.get('host'),\n secure=module.params.get('use_ssl'),\n timeout=module.params.get('timeout'),\n verify=module.params.get('validate_certs'),\n cert=certificate_ssl,\n namespace=module.params.get('namespace'),\n token=module.params.get('token')\n )\n\n changed = False\n result = list()\n try:\n job_list = nomad_client.jobs.get_jobs()\n for job in job_list:\n result.append(nomad_client.job.get_job(job.get('ID')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n if module.params.get('name'):\n filter = list()\n try:\n for job in result:\n if job.get('ID') == module.params.get('name'):\n filter.append(job)\n result = filter\n if not filter:\n module.fail_json(msg=\"Couldn't find Job with id \" + str(module.params.get('name')))\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n module.exit_json(changed=changed, result=result)\n\n\ndef main():\n\n run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/clustering/nomad/nomad_job_info.py"}]} | 3,979 | 161 |
gh_patches_debug_29707 | rasdani/github-patches | git_diff | e-valuation__EvaP-2040 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace `toggle_no_grades` with an idempotent alternative
Currently, there is a URL for toggling grade status of a course (`grades:toggle_no_grades`). Instead of having a toggle, we would like an idempotent operation, that is, the endpoint should be something like `grades:set_gets_grade_documents` where the desired boolean value is sent with the request. This way, two people who want to change the status don't revert each others action but instead set the same value.
</issue>
<code>
[start of evap/grades/urls.py]
1 from django.urls import path
2
3 from evap.grades import views
4
5 app_name = "grades"
6
7 urlpatterns = [
8 path("", views.IndexView.as_view(), name="index"),
9 path("download/<int:grade_document_id>", views.download_grades, name="download_grades"),
10 path("semester/<int:semester_id>", views.SemesterView.as_view(), name="semester_view"),
11 path("course/<int:course_id>", views.CourseView.as_view(), name="course_view"),
12 path("course/<int:course_id>/upload", views.upload_grades, name="upload_grades"),
13 path("grade_document/<int:grade_document_id>/edit", views.edit_grades, name="edit_grades"),
14
15 path("delete_grades", views.delete_grades, name="delete_grades"),
16 path("toggle_no_grades", views.toggle_no_grades, name="toggle_no_grades"),
17 ]
18
[end of evap/grades/urls.py]
[start of evap/grades/views.py]
1 from django.conf import settings
2 from django.contrib import messages
3 from django.core.exceptions import PermissionDenied
4 from django.db.models.query import QuerySet
5 from django.http import FileResponse, HttpResponse
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import gettext as _
8 from django.views.decorators.http import require_GET, require_POST
9 from django.views.generic import DetailView, TemplateView
10
11 from evap.evaluation.auth import (
12 grade_downloader_required,
13 grade_publisher_or_manager_required,
14 grade_publisher_required,
15 )
16 from evap.evaluation.models import Course, EmailTemplate, Evaluation, Semester
17 from evap.evaluation.tools import get_object_from_dict_pk_entry_or_logged_40x, ilen
18 from evap.grades.forms import GradeDocumentForm
19 from evap.grades.models import GradeDocument
20
21
22 @grade_publisher_required
23 class IndexView(TemplateView):
24 template_name = "grades_index.html"
25
26 def get_context_data(self, **kwargs):
27 return super().get_context_data(**kwargs) | {
28 "semesters": Semester.objects.filter(grade_documents_are_deleted=False),
29 "disable_breadcrumb_grades": True,
30 }
31
32
33 def course_grade_document_count_tuples(courses: QuerySet[Course]) -> list[tuple[Course, int, int]]:
34 courses = courses.prefetch_related("degrees", "responsibles", "evaluations", "grade_documents")
35
36 return [
37 (
38 course,
39 ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.MIDTERM_GRADES),
40 ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.FINAL_GRADES),
41 )
42 for course in courses
43 ]
44
45
46 @grade_publisher_required
47 class SemesterView(DetailView):
48 template_name = "grades_semester_view.html"
49 model = Semester
50 pk_url_kwarg = "semester_id"
51
52 object: Semester
53
54 def get_object(self, *args, **kwargs):
55 semester = super().get_object(*args, **kwargs)
56 if semester.grade_documents_are_deleted:
57 raise PermissionDenied
58 return semester
59
60 def get_context_data(self, **kwargs):
61 courses = (
62 self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True)
63 .exclude(evaluations__state=Evaluation.State.NEW)
64 .distinct()
65 )
66 courses = course_grade_document_count_tuples(courses)
67
68 return super().get_context_data(**kwargs) | {
69 "courses": courses,
70 "disable_breadcrumb_semester": True,
71 }
72
73
74 @grade_publisher_or_manager_required
75 class CourseView(DetailView):
76 template_name = "grades_course_view.html"
77 model = Course
78 pk_url_kwarg = "course_id"
79
80 def get_object(self, *args, **kwargs):
81 course = super().get_object(*args, **kwargs)
82 if course.semester.grade_documents_are_deleted:
83 raise PermissionDenied
84 return course
85
86 def get_context_data(self, **kwargs):
87 return super().get_context_data(**kwargs) | {
88 "semester": self.object.semester,
89 "grade_documents": self.object.grade_documents.all(),
90 "disable_breadcrumb_course": True,
91 }
92
93
94 def on_grading_process_finished(course):
95 evaluations = course.evaluations.all()
96 if all(evaluation.state == Evaluation.State.REVIEWED for evaluation in evaluations):
97 for evaluation in evaluations:
98 assert evaluation.grading_process_is_finished
99 for evaluation in evaluations:
100 evaluation.publish()
101 evaluation.save()
102
103 EmailTemplate.send_participant_publish_notifications(evaluations)
104 EmailTemplate.send_contributor_publish_notifications(evaluations)
105
106
107 @grade_publisher_required
108 def upload_grades(request, course_id):
109 course = get_object_or_404(Course, id=course_id)
110 semester = course.semester
111 if semester.grade_documents_are_deleted:
112 raise PermissionDenied
113
114 final_grades = request.GET.get("final") == "true" # if parameter is not given, assume midterm grades
115
116 grade_document = GradeDocument(course=course)
117 if final_grades:
118 grade_document.type = GradeDocument.Type.FINAL_GRADES
119 grade_document.description_en = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_EN
120 grade_document.description_de = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_DE
121 else:
122 grade_document.type = GradeDocument.Type.MIDTERM_GRADES
123 grade_document.description_en = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_EN
124 grade_document.description_de = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_DE
125
126 form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)
127
128 if form.is_valid():
129 form.save(modifying_user=request.user)
130
131 if final_grades:
132 on_grading_process_finished(course)
133
134 messages.success(request, _("Successfully uploaded grades."))
135 return redirect("grades:course_view", course.id)
136
137 template_data = {
138 "semester": semester,
139 "course": course,
140 "form": form,
141 "final_grades": final_grades,
142 "show_automated_publishing_info": final_grades,
143 }
144 return render(request, "grades_upload_form.html", template_data)
145
146
147 @require_POST
148 @grade_publisher_required
149 def toggle_no_grades(request):
150 course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, "course_id")
151 if course.semester.grade_documents_are_deleted:
152 raise PermissionDenied
153
154 course.gets_no_grade_documents = not course.gets_no_grade_documents
155 course.save()
156
157 if course.gets_no_grade_documents:
158 on_grading_process_finished(course)
159
160 return HttpResponse() # 200 OK
161
162
163 @require_GET
164 @grade_downloader_required
165 def download_grades(request, grade_document_id):
166 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
167 if grade_document.course.semester.grade_documents_are_deleted:
168 raise PermissionDenied
169
170 return FileResponse(grade_document.file.open(), filename=grade_document.filename(), as_attachment=True)
171
172
173 @grade_publisher_required
174 def edit_grades(request, grade_document_id):
175 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
176 course = grade_document.course
177 semester = course.semester
178 if semester.grade_documents_are_deleted:
179 raise PermissionDenied
180
181 form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)
182
183 final_grades = (
184 grade_document.type == GradeDocument.Type.FINAL_GRADES
185 ) # if parameter is not given, assume midterm grades
186
187 if form.is_valid():
188 form.save(modifying_user=request.user)
189 messages.success(request, _("Successfully updated grades."))
190 return redirect("grades:course_view", course.id)
191
192 template_data = {
193 "semester": semester,
194 "course": course,
195 "form": form,
196 "show_automated_publishing_info": False,
197 "final_grades": final_grades,
198 }
199 return render(request, "grades_upload_form.html", template_data)
200
201
202 @require_POST
203 @grade_publisher_required
204 def delete_grades(request):
205 grade_document = get_object_from_dict_pk_entry_or_logged_40x(GradeDocument, request.POST, "grade_document_id")
206 grade_document.delete()
207 return HttpResponse() # 200 OK
208
[end of evap/grades/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/grades/urls.py b/evap/grades/urls.py
--- a/evap/grades/urls.py
+++ b/evap/grades/urls.py
@@ -13,5 +13,5 @@
path("grade_document/<int:grade_document_id>/edit", views.edit_grades, name="edit_grades"),
path("delete_grades", views.delete_grades, name="delete_grades"),
- path("toggle_no_grades", views.toggle_no_grades, name="toggle_no_grades"),
+ path("set_no_grades", views.set_no_grades, name="set_no_grades"),
]
diff --git a/evap/grades/views.py b/evap/grades/views.py
--- a/evap/grades/views.py
+++ b/evap/grades/views.py
@@ -1,6 +1,6 @@
from django.conf import settings
from django.contrib import messages
-from django.core.exceptions import PermissionDenied
+from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db.models.query import QuerySet
from django.http import FileResponse, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
@@ -146,12 +146,18 @@
@require_POST
@grade_publisher_required
-def toggle_no_grades(request):
+def set_no_grades(request):
course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, "course_id")
+
+ try:
+ status = bool(int(request.POST["status"]))
+ except (KeyError, TypeError, ValueError) as e:
+ raise SuspiciousOperation from e
+
if course.semester.grade_documents_are_deleted:
raise PermissionDenied
- course.gets_no_grade_documents = not course.gets_no_grade_documents
+ course.gets_no_grade_documents = status
course.save()
if course.gets_no_grade_documents:
| {"golden_diff": "diff --git a/evap/grades/urls.py b/evap/grades/urls.py\n--- a/evap/grades/urls.py\n+++ b/evap/grades/urls.py\n@@ -13,5 +13,5 @@\n path(\"grade_document/<int:grade_document_id>/edit\", views.edit_grades, name=\"edit_grades\"),\n \n path(\"delete_grades\", views.delete_grades, name=\"delete_grades\"),\n- path(\"toggle_no_grades\", views.toggle_no_grades, name=\"toggle_no_grades\"),\n+ path(\"set_no_grades\", views.set_no_grades, name=\"set_no_grades\"),\n ]\ndiff --git a/evap/grades/views.py b/evap/grades/views.py\n--- a/evap/grades/views.py\n+++ b/evap/grades/views.py\n@@ -1,6 +1,6 @@\n from django.conf import settings\n from django.contrib import messages\n-from django.core.exceptions import PermissionDenied\n+from django.core.exceptions import PermissionDenied, SuspiciousOperation\n from django.db.models.query import QuerySet\n from django.http import FileResponse, HttpResponse\n from django.shortcuts import get_object_or_404, redirect, render\n@@ -146,12 +146,18 @@\n \n @require_POST\n @grade_publisher_required\n-def toggle_no_grades(request):\n+def set_no_grades(request):\n course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, \"course_id\")\n+\n+ try:\n+ status = bool(int(request.POST[\"status\"]))\n+ except (KeyError, TypeError, ValueError) as e:\n+ raise SuspiciousOperation from e\n+\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n \n- course.gets_no_grade_documents = not course.gets_no_grade_documents\n+ course.gets_no_grade_documents = status\n course.save()\n \n if course.gets_no_grade_documents:\n", "issue": "Replace `toggle_no_grades` with an idempotent alternative\nCurrently, there is a URL for toggling grade status of a course (`grades:toggle_no_grades`). Instead of having a toggle, we would like an idempotent operation, that is, the endpoint should be something like `grades:set_gets_grade_documents` where the desired boolean value is sent with the request. This way, two people who want to change the status don't revert each others action but instead set the same value.\n", "before_files": [{"content": "from django.urls import path\n\nfrom evap.grades import views\n\napp_name = \"grades\"\n\nurlpatterns = [\n path(\"\", views.IndexView.as_view(), name=\"index\"),\n path(\"download/<int:grade_document_id>\", views.download_grades, name=\"download_grades\"),\n path(\"semester/<int:semester_id>\", views.SemesterView.as_view(), name=\"semester_view\"),\n path(\"course/<int:course_id>\", views.CourseView.as_view(), name=\"course_view\"),\n path(\"course/<int:course_id>/upload\", views.upload_grades, name=\"upload_grades\"),\n path(\"grade_document/<int:grade_document_id>/edit\", views.edit_grades, name=\"edit_grades\"),\n\n path(\"delete_grades\", views.delete_grades, name=\"delete_grades\"),\n path(\"toggle_no_grades\", views.toggle_no_grades, name=\"toggle_no_grades\"),\n]\n", "path": "evap/grades/urls.py"}, {"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models.query import QuerySet\nfrom django.http import FileResponse, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.generic import DetailView, TemplateView\n\nfrom evap.evaluation.auth import (\n grade_downloader_required,\n grade_publisher_or_manager_required,\n grade_publisher_required,\n)\nfrom evap.evaluation.models import Course, EmailTemplate, Evaluation, Semester\nfrom evap.evaluation.tools import get_object_from_dict_pk_entry_or_logged_40x, ilen\nfrom evap.grades.forms import GradeDocumentForm\nfrom evap.grades.models import GradeDocument\n\n\n@grade_publisher_required\nclass IndexView(TemplateView):\n template_name = \"grades_index.html\"\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semesters\": Semester.objects.filter(grade_documents_are_deleted=False),\n \"disable_breadcrumb_grades\": True,\n }\n\n\ndef course_grade_document_count_tuples(courses: QuerySet[Course]) -> list[tuple[Course, int, int]]:\n courses = courses.prefetch_related(\"degrees\", \"responsibles\", \"evaluations\", \"grade_documents\")\n\n return [\n (\n course,\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.MIDTERM_GRADES),\n ilen(gd for gd in course.grade_documents.all() if gd.type == GradeDocument.Type.FINAL_GRADES),\n )\n for course in courses\n ]\n\n\n@grade_publisher_required\nclass SemesterView(DetailView):\n template_name = \"grades_semester_view.html\"\n model = Semester\n pk_url_kwarg = \"semester_id\"\n\n object: Semester\n\n def get_object(self, *args, **kwargs):\n semester = super().get_object(*args, **kwargs)\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n return semester\n\n def get_context_data(self, **kwargs):\n courses = (\n self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True)\n .exclude(evaluations__state=Evaluation.State.NEW)\n .distinct()\n )\n courses = course_grade_document_count_tuples(courses)\n\n return super().get_context_data(**kwargs) | {\n \"courses\": courses,\n \"disable_breadcrumb_semester\": True,\n }\n\n\n@grade_publisher_or_manager_required\nclass CourseView(DetailView):\n template_name = \"grades_course_view.html\"\n model = Course\n pk_url_kwarg = \"course_id\"\n\n def get_object(self, *args, **kwargs):\n course = super().get_object(*args, **kwargs)\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n return course\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(**kwargs) | {\n \"semester\": self.object.semester,\n \"grade_documents\": self.object.grade_documents.all(),\n \"disable_breadcrumb_course\": True,\n }\n\n\ndef on_grading_process_finished(course):\n evaluations = course.evaluations.all()\n if all(evaluation.state == Evaluation.State.REVIEWED for evaluation in evaluations):\n for evaluation in evaluations:\n assert evaluation.grading_process_is_finished\n for evaluation in evaluations:\n evaluation.publish()\n evaluation.save()\n\n EmailTemplate.send_participant_publish_notifications(evaluations)\n EmailTemplate.send_contributor_publish_notifications(evaluations)\n\n\n@grade_publisher_required\ndef upload_grades(request, course_id):\n course = get_object_or_404(Course, id=course_id)\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n final_grades = request.GET.get(\"final\") == \"true\" # if parameter is not given, assume midterm grades\n\n grade_document = GradeDocument(course=course)\n if final_grades:\n grade_document.type = GradeDocument.Type.FINAL_GRADES\n grade_document.description_en = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_FINAL_GRADES_DESCRIPTION_DE\n else:\n grade_document.type = GradeDocument.Type.MIDTERM_GRADES\n grade_document.description_en = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_EN\n grade_document.description_de = settings.DEFAULT_MIDTERM_GRADES_DESCRIPTION_DE\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n\n if final_grades:\n on_grading_process_finished(course)\n\n messages.success(request, _(\"Successfully uploaded grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"final_grades\": final_grades,\n \"show_automated_publishing_info\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef toggle_no_grades(request):\n course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, \"course_id\")\n if course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n course.gets_no_grade_documents = not course.gets_no_grade_documents\n course.save()\n\n if course.gets_no_grade_documents:\n on_grading_process_finished(course)\n\n return HttpResponse() # 200 OK\n\n\n@require_GET\n@grade_downloader_required\ndef download_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n if grade_document.course.semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n return FileResponse(grade_document.file.open(), filename=grade_document.filename(), as_attachment=True)\n\n\n@grade_publisher_required\ndef edit_grades(request, grade_document_id):\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n course = grade_document.course\n semester = course.semester\n if semester.grade_documents_are_deleted:\n raise PermissionDenied\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document)\n\n final_grades = (\n grade_document.type == GradeDocument.Type.FINAL_GRADES\n ) # if parameter is not given, assume midterm grades\n\n if form.is_valid():\n form.save(modifying_user=request.user)\n messages.success(request, _(\"Successfully updated grades.\"))\n return redirect(\"grades:course_view\", course.id)\n\n template_data = {\n \"semester\": semester,\n \"course\": course,\n \"form\": form,\n \"show_automated_publishing_info\": False,\n \"final_grades\": final_grades,\n }\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@require_POST\n@grade_publisher_required\ndef delete_grades(request):\n grade_document = get_object_from_dict_pk_entry_or_logged_40x(GradeDocument, request.POST, \"grade_document_id\")\n grade_document.delete()\n return HttpResponse() # 200 OK\n", "path": "evap/grades/views.py"}]} | 2,982 | 425 |
gh_patches_debug_37255 | rasdani/github-patches | git_diff | zulip__zulip-14742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails
In https://github.com/zulip/zulip/issues/14615, we have `upgrade-zulip-from-git` failing inside `update-prod-static`, and the error message is terrible:
```
2020-04-17 21:23:41,590 upgrade-zulip-stage-2: Building static assets...
Cached version not found! Installing node modules.
+ /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod
Error running a subcommand of ./tools/update-prod-static: /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod
Actual error output for the subcommand is just above this.
Traceback (most recent call last):
File "./tools/update-prod-static", line 37, in <module>
setup_node_modules(production=True, stdout=fp, stderr=fp)
File "./tools/../scripts/lib/node_cache.py", line 66, in setup_node_modules
stderr=stderr)
File "./tools/../scripts/lib/node_cache.py", line 91, in do_yarn_install
cwd=target_path, stdout=stdout, stderr=stderr)
File "./tools/../scripts/lib/zulip_tools.py", line 200, in run
subprocess.check_call(args, **kwargs)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/srv/zulip-yarn/bin/yarn', 'install', '--non-interactive', '--frozen-lockfile', '--prod']' returned non-zero exit status 1
Traceback (most recent call last):
File "/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2", line 122, in <module>
preexec_fn=su_to_zulip)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['./tools/update-prod-static', '--authors-not-required', '--prev-deploy', '/home/zulip/deployments/current']' returned non-zero exit status 1
Traceback (most recent call last):
File "/home/zulip/deployments/current/scripts/lib/upgrade-zulip-from-git", line 69, in <module>
deploy_path, "--from-git"] + deploy_options)
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2', '/home/zulip/deployments/2020-04-18-01-53-13', '--from-git']' returned non-zero exit status 1
```
Because `update-prod-static` is failing, the actual error messages is in `/home/zulip/deployments/next/var/log/upgrade-prod-static.log`.
`update-prod-static` itself has very verbose output, so I think there's two paths forward:
* Simplest is to copy `build-release-tarball` in at least printing the path to `upgrade-prod-static.log`, and perhaps catching and not emitting much of that unhelpful traceback.
* Better would be to make `update-prod-static` have more sane output.
upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails
This just prints the path to the prod static log file.
</issue>
<code>
[start of scripts/lib/node_cache.py]
1 import os
2 import hashlib
3 import json
4 import shutil
5
6 from typing import Optional, List, IO, Any
7 from scripts.lib.zulip_tools import subprocess_text_output, run
8
9 ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10 ZULIP_SRV_PATH = "/srv"
11
12 if 'TRAVIS' in os.environ:
13 # In Travis CI, we don't have root access
14 ZULIP_SRV_PATH = "/home/travis"
15
16
17 NODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')
18 YARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')
19 YARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/package.json')
20
21 DEFAULT_PRODUCTION = False
22
23 def get_yarn_args(production: bool) -> List[str]:
24 if production:
25 yarn_args = ["--prod"]
26 else:
27 yarn_args = []
28 return yarn_args
29
30 def generate_sha1sum_node_modules(
31 setup_dir: Optional[str] = None, production: bool = DEFAULT_PRODUCTION
32 ) -> str:
33 if setup_dir is None:
34 setup_dir = os.path.realpath(os.getcwd())
35 PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')
36 YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')
37 sha1sum = hashlib.sha1()
38 sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))
39 if os.path.exists(YARN_LOCK_FILE_PATH):
40 # For backwards compatibility, we can't assume yarn.lock exists
41 sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))
42 with open(YARN_PACKAGE_JSON) as f:
43 yarn_version = json.load(f)['version']
44 sha1sum.update(yarn_version.encode("utf8"))
45 sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
46 yarn_args = get_yarn_args(production=production)
47 sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))
48 return sha1sum.hexdigest()
49
50 def setup_node_modules(
51 production: bool = DEFAULT_PRODUCTION,
52 stdout: Optional[IO[Any]] = None,
53 stderr: Optional[IO[Any]] = None,
54 prefer_offline: bool = False,
55 ) -> None:
56 yarn_args = get_yarn_args(production=production)
57 if prefer_offline:
58 yarn_args.append("--prefer-offline")
59 sha1sum = generate_sha1sum_node_modules(production=production)
60 target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)
61 cached_node_modules = os.path.join(target_path, 'node_modules')
62 success_stamp = os.path.join(target_path, '.success-stamp')
63 # Check if a cached version already exists
64 if not os.path.exists(success_stamp):
65 do_yarn_install(target_path,
66 yarn_args,
67 success_stamp,
68 stdout=stdout,
69 stderr=stderr)
70
71 print("Using cached node modules from %s" % (cached_node_modules,))
72 if os.path.islink('node_modules'):
73 os.remove('node_modules')
74 elif os.path.isdir('node_modules'):
75 shutil.rmtree('node_modules')
76 os.symlink(cached_node_modules, 'node_modules')
77
78 def do_yarn_install(
79 target_path: str,
80 yarn_args: List[str],
81 success_stamp: str,
82 stdout: Optional[IO[Any]] = None,
83 stderr: Optional[IO[Any]] = None,
84 ) -> None:
85 os.makedirs(target_path, exist_ok=True)
86 shutil.copy('package.json', target_path)
87 shutil.copy("yarn.lock", target_path)
88 shutil.copy(".yarnrc", target_path)
89 cached_node_modules = os.path.join(target_path, 'node_modules')
90 print("Cached version not found! Installing node modules.")
91
92 # Copy the existing node_modules to speed up install
93 if os.path.exists("node_modules") and not os.path.exists(cached_node_modules):
94 shutil.copytree("node_modules/", cached_node_modules, symlinks=True)
95 if os.environ.get('CUSTOM_CA_CERTIFICATES'):
96 run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']],
97 stdout=stdout, stderr=stderr)
98 run([YARN_BIN, "install", "--non-interactive", "--frozen-lockfile"] + yarn_args,
99 cwd=target_path, stdout=stdout, stderr=stderr)
100 with open(success_stamp, 'w'):
101 pass
102
[end of scripts/lib/node_cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/node_cache.py b/scripts/lib/node_cache.py
--- a/scripts/lib/node_cache.py
+++ b/scripts/lib/node_cache.py
@@ -3,7 +3,7 @@
import json
import shutil
-from typing import Optional, List, IO, Any
+from typing import Optional, List
from scripts.lib.zulip_tools import subprocess_text_output, run
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -49,8 +49,6 @@
def setup_node_modules(
production: bool = DEFAULT_PRODUCTION,
- stdout: Optional[IO[Any]] = None,
- stderr: Optional[IO[Any]] = None,
prefer_offline: bool = False,
) -> None:
yarn_args = get_yarn_args(production=production)
@@ -64,9 +62,7 @@
if not os.path.exists(success_stamp):
do_yarn_install(target_path,
yarn_args,
- success_stamp,
- stdout=stdout,
- stderr=stderr)
+ success_stamp)
print("Using cached node modules from %s" % (cached_node_modules,))
if os.path.islink('node_modules'):
@@ -78,9 +74,7 @@
def do_yarn_install(
target_path: str,
yarn_args: List[str],
- success_stamp: str,
- stdout: Optional[IO[Any]] = None,
- stderr: Optional[IO[Any]] = None,
+ success_stamp: str
) -> None:
os.makedirs(target_path, exist_ok=True)
shutil.copy('package.json', target_path)
@@ -93,9 +87,8 @@
if os.path.exists("node_modules") and not os.path.exists(cached_node_modules):
shutil.copytree("node_modules/", cached_node_modules, symlinks=True)
if os.environ.get('CUSTOM_CA_CERTIFICATES'):
- run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']],
- stdout=stdout, stderr=stderr)
+ run([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']])
run([YARN_BIN, "install", "--non-interactive", "--frozen-lockfile"] + yarn_args,
- cwd=target_path, stdout=stdout, stderr=stderr)
+ cwd=target_path)
with open(success_stamp, 'w'):
pass
| {"golden_diff": "diff --git a/scripts/lib/node_cache.py b/scripts/lib/node_cache.py\n--- a/scripts/lib/node_cache.py\n+++ b/scripts/lib/node_cache.py\n@@ -3,7 +3,7 @@\n import json\n import shutil\n \n-from typing import Optional, List, IO, Any\n+from typing import Optional, List\n from scripts.lib.zulip_tools import subprocess_text_output, run\n \n ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n@@ -49,8 +49,6 @@\n \n def setup_node_modules(\n production: bool = DEFAULT_PRODUCTION,\n- stdout: Optional[IO[Any]] = None,\n- stderr: Optional[IO[Any]] = None,\n prefer_offline: bool = False,\n ) -> None:\n yarn_args = get_yarn_args(production=production)\n@@ -64,9 +62,7 @@\n if not os.path.exists(success_stamp):\n do_yarn_install(target_path,\n yarn_args,\n- success_stamp,\n- stdout=stdout,\n- stderr=stderr)\n+ success_stamp)\n \n print(\"Using cached node modules from %s\" % (cached_node_modules,))\n if os.path.islink('node_modules'):\n@@ -78,9 +74,7 @@\n def do_yarn_install(\n target_path: str,\n yarn_args: List[str],\n- success_stamp: str,\n- stdout: Optional[IO[Any]] = None,\n- stderr: Optional[IO[Any]] = None,\n+ success_stamp: str\n ) -> None:\n os.makedirs(target_path, exist_ok=True)\n shutil.copy('package.json', target_path)\n@@ -93,9 +87,8 @@\n if os.path.exists(\"node_modules\") and not os.path.exists(cached_node_modules):\n shutil.copytree(\"node_modules/\", cached_node_modules, symlinks=True)\n if os.environ.get('CUSTOM_CA_CERTIFICATES'):\n- run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']],\n- stdout=stdout, stderr=stderr)\n+ run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']])\n run([YARN_BIN, \"install\", \"--non-interactive\", \"--frozen-lockfile\"] + yarn_args,\n- cwd=target_path, stdout=stdout, stderr=stderr)\n+ cwd=target_path)\n with open(success_stamp, 'w'):\n pass\n", "issue": "upgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails\nIn https://github.com/zulip/zulip/issues/14615, we have `upgrade-zulip-from-git` failing inside `update-prod-static`, and the error message is terrible:\r\n\r\n```\r\n2020-04-17 21:23:41,590 upgrade-zulip-stage-2: Building static assets...\r\nCached version not found! Installing node modules.\r\n+ /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod\r\n\r\nError running a subcommand of ./tools/update-prod-static: /srv/zulip-yarn/bin/yarn install --non-interactive --frozen-lockfile --prod\r\nActual error output for the subcommand is just above this.\r\n\r\nTraceback (most recent call last):\r\n File \"./tools/update-prod-static\", line 37, in <module>\r\n setup_node_modules(production=True, stdout=fp, stderr=fp)\r\n File \"./tools/../scripts/lib/node_cache.py\", line 66, in setup_node_modules\r\n stderr=stderr)\r\n File \"./tools/../scripts/lib/node_cache.py\", line 91, in do_yarn_install\r\n cwd=target_path, stdout=stdout, stderr=stderr)\r\n File \"./tools/../scripts/lib/zulip_tools.py\", line 200, in run\r\n subprocess.check_call(args, **kwargs)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['/srv/zulip-yarn/bin/yarn', 'install', '--non-interactive', '--frozen-lockfile', '--prod']' returned non-zero exit status 1\r\nTraceback (most recent call last):\r\n File \"/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2\", line 122, in <module>\r\n preexec_fn=su_to_zulip)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['./tools/update-prod-static', '--authors-not-required', '--prev-deploy', '/home/zulip/deployments/current']' returned non-zero exit status 1\r\nTraceback (most recent call last):\r\n File \"/home/zulip/deployments/current/scripts/lib/upgrade-zulip-from-git\", line 69, in <module>\r\n deploy_path, \"--from-git\"] + deploy_options)\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['/home/zulip/deployments/2020-04-18-01-53-13/scripts/lib/upgrade-zulip-stage-2', '/home/zulip/deployments/2020-04-18-01-53-13', '--from-git']' returned non-zero exit status 1\r\n```\r\n\r\nBecause `update-prod-static` is failing, the actual error messages is in `/home/zulip/deployments/next/var/log/upgrade-prod-static.log`.\r\n\r\n`update-prod-static` itself has very verbose output, so I think there's two paths forward:\r\n* Simplest is to copy `build-release-tarball` in at least printing the path to `upgrade-prod-static.log`, and perhaps catching and not emitting much of that unhelpful traceback.\r\n* Better would be to make `update-prod-static` have more sane output.\nupgrade-zulip-from-git: Provide useful error messages when `update-prod-static` fails \nThis just prints the path to the prod static log file. \n", "before_files": [{"content": "import os\nimport hashlib\nimport json\nimport shutil\n\nfrom typing import Optional, List, IO, Any\nfrom scripts.lib.zulip_tools import subprocess_text_output, run\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nZULIP_SRV_PATH = \"/srv\"\n\nif 'TRAVIS' in os.environ:\n # In Travis CI, we don't have root access\n ZULIP_SRV_PATH = \"/home/travis\"\n\n\nNODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')\nYARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')\nYARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/package.json')\n\nDEFAULT_PRODUCTION = False\n\ndef get_yarn_args(production: bool) -> List[str]:\n if production:\n yarn_args = [\"--prod\"]\n else:\n yarn_args = []\n return yarn_args\n\ndef generate_sha1sum_node_modules(\n setup_dir: Optional[str] = None, production: bool = DEFAULT_PRODUCTION\n) -> str:\n if setup_dir is None:\n setup_dir = os.path.realpath(os.getcwd())\n PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')\n YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')\n sha1sum = hashlib.sha1()\n sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))\n if os.path.exists(YARN_LOCK_FILE_PATH):\n # For backwards compatibility, we can't assume yarn.lock exists\n sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))\n with open(YARN_PACKAGE_JSON) as f:\n yarn_version = json.load(f)['version']\n sha1sum.update(yarn_version.encode(\"utf8\"))\n sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))\n yarn_args = get_yarn_args(production=production)\n sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))\n return sha1sum.hexdigest()\n\ndef setup_node_modules(\n production: bool = DEFAULT_PRODUCTION,\n stdout: Optional[IO[Any]] = None,\n stderr: Optional[IO[Any]] = None,\n prefer_offline: bool = False,\n) -> None:\n yarn_args = get_yarn_args(production=production)\n if prefer_offline:\n yarn_args.append(\"--prefer-offline\")\n sha1sum = generate_sha1sum_node_modules(production=production)\n target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n success_stamp = os.path.join(target_path, '.success-stamp')\n # Check if a cached version already exists\n if not os.path.exists(success_stamp):\n do_yarn_install(target_path,\n yarn_args,\n success_stamp,\n stdout=stdout,\n stderr=stderr)\n\n print(\"Using cached node modules from %s\" % (cached_node_modules,))\n if os.path.islink('node_modules'):\n os.remove('node_modules')\n elif os.path.isdir('node_modules'):\n shutil.rmtree('node_modules')\n os.symlink(cached_node_modules, 'node_modules')\n\ndef do_yarn_install(\n target_path: str,\n yarn_args: List[str],\n success_stamp: str,\n stdout: Optional[IO[Any]] = None,\n stderr: Optional[IO[Any]] = None,\n) -> None:\n os.makedirs(target_path, exist_ok=True)\n shutil.copy('package.json', target_path)\n shutil.copy(\"yarn.lock\", target_path)\n shutil.copy(\".yarnrc\", target_path)\n cached_node_modules = os.path.join(target_path, 'node_modules')\n print(\"Cached version not found! Installing node modules.\")\n\n # Copy the existing node_modules to speed up install\n if os.path.exists(\"node_modules\") and not os.path.exists(cached_node_modules):\n shutil.copytree(\"node_modules/\", cached_node_modules, symlinks=True)\n if os.environ.get('CUSTOM_CA_CERTIFICATES'):\n run([YARN_BIN, \"config\", \"set\", \"cafile\", os.environ['CUSTOM_CA_CERTIFICATES']],\n stdout=stdout, stderr=stderr)\n run([YARN_BIN, \"install\", \"--non-interactive\", \"--frozen-lockfile\"] + yarn_args,\n cwd=target_path, stdout=stdout, stderr=stderr)\n with open(success_stamp, 'w'):\n pass\n", "path": "scripts/lib/node_cache.py"}]} | 2,593 | 536 |
gh_patches_debug_15730 | rasdani/github-patches | git_diff | learningequality__kolibri-4679 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some images are not found
### Observed behavior
In the current Kolibri develop branch, some images are not found after building js bundle.
I.e.
`yarn devserver`works perfectly
`yarn build & yarn django-devserver` gives a not-found error in the urls
/user/kolibri-logo.svg
/user/background.jpg

It looks like a path issue in the frontend
### Expected behavior
All the images should be found when using the built bundles.
### User-facing consequences
Bad look
### Steps to reproduce
Using develop code
`yarn build` will build the bundles
`yarn django-devserver` will run only the Django webserver.
Browsing in Kolibri will be visible
### Context
* Kolibri develop branch
* Tested with Google Chrome
* OS: Linux
</issue>
<code>
[start of kolibri/core/templatetags/kolibri_tags.py]
1 """
2 Kolibri template tags
3 =====================
4 """
5 from __future__ import absolute_import
6 from __future__ import print_function
7 from __future__ import unicode_literals
8
9 import copy
10 import json
11 import re
12
13 import user_agents
14 from django import template
15 from django.conf import settings
16 from django.contrib.staticfiles.templatetags.staticfiles import static
17 from django.core.serializers.json import DjangoJSONEncoder
18 from django.core.urlresolvers import resolve
19 from django.core.urlresolvers import reverse
20 from django.utils.html import mark_safe
21 from django.utils.timezone import now
22 from django.utils.translation import get_language
23 from django.utils.translation import get_language_bidi
24 from django.utils.translation import get_language_info
25 from django_js_reverse.js_reverse_settings import JS_GLOBAL_OBJECT_NAME
26 from django_js_reverse.js_reverse_settings import JS_VAR_NAME
27 from django_js_reverse.templatetags.js_reverse import js_reverse_inline
28 from rest_framework.renderers import JSONRenderer
29 from six import iteritems
30
31 import kolibri
32 from kolibri.core.device.models import ContentCacheKey
33 from kolibri.core.hooks import NavigationHook
34 from kolibri.core.webpack.utils import webpack_asset_render
35 from kolibri.utils import conf
36 from kolibri.utils import i18n
37
38 register = template.Library()
39
40
41 @register.simple_tag()
42 def kolibri_content_cache_key():
43 js = """
44 <script>
45 var contentCacheKey = '{cache_key}';
46 </script>
47 """.format(
48 cache_key=ContentCacheKey.get_cache_key(),
49 )
50 return mark_safe(js)
51
52
53 def _supports_modern_fonts(request):
54 """
55 In order to use the modern font-loading strategy we need to ensure two things:
56
57 1. The browser needs to properly use the font-face unicode-range descriptor in order
58 only load fonts when they are needed. This allows us to reference fonts for every
59 supported alphabet while ensuring that the client doesn't download many megabytes
60 of font data.
61
62 2. The browser needs to avoid a flash of invisible text (FOIT) while extra fonts are
63 loading, and instead render text using the browser's default fonts (FOUT). This
64 allows users to view and begin reading text, even if the fonts haven't loaded yet.
65 With some browsers this means supporting the new font-display descriptor. The
66 Edge browser uses FOUT instead of FOIT by default, and therefore doesn't need to
67 support font-display.
68
69 Based on https://caniuse.com/#feat=font-unicode-range
70 """
71
72 if 'HTTP_USER_AGENT' not in request.META:
73 return False
74
75 browser = user_agents.parse(request.META['HTTP_USER_AGENT']).browser
76
77 if browser.family == "Edge": # Edge only needs unicode-range, not font-display
78 return browser.version[0] >= 17
79 if browser.family in ("Firefox", "Firefox Mobile"):
80 return browser.version[0] >= 58
81 if browser.family in ("Chrome", "Chrome Mobile"):
82 return browser.version[0] >= 60
83 if browser.family == "Safari":
84 return browser.version[0] >= 11 and browser.version[1] >= 1
85 if browser.family == "Opera":
86 return browser.version[0] >= 47
87 if browser.family == "Mobile Safari":
88 return browser.version[0] >= 11 and browser.version[1] >= 4
89
90 return False
91
92
93 @register.simple_tag(takes_context=True)
94 def kolibri_language_globals(context):
95
96 template = """
97 <script>
98 var languageCode = '{lang_code}';
99 var languageDir = '{lang_dir}';
100 var languages = JSON.parse('{languages}');
101 var useModernFontLoading = {use_modern};
102 </script>
103 <link type="text/css" href="{common_css_file}?v={version}" rel="stylesheet"/>
104 <link type="text/css" href="{subset_css_file}?v={version}" rel="stylesheet"/>
105 <link type="text/css" href="{full_css_file}?v={version}" rel="stylesheet"/>
106 """
107
108 language_code = get_language()
109 lang_dir = "rtl" if get_language_bidi() else "ltr"
110
111 languages = {}
112 for code, language_name in settings.LANGUAGES:
113 lang_info = next((lang for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES if lang['intl_code'] == code), None)
114 languages[code] = {
115 # Format to match the schema of the content Language model
116 "id": code,
117 "lang_name": language_name,
118 "english_name": lang_info["english_name"] if lang_info else get_language_info(code)["name"],
119 "lang_direction": get_language_info(code)["bidi"],
120 }
121
122 common_file = static("assets/fonts/noto-common.css")
123 subset_file = static("assets/fonts/noto-subset.{}.css".format(language_code))
124 is_modern = _supports_modern_fonts(context["request"])
125 full_file = static(
126 "assets/fonts/noto-full.{}.{}.css".format(
127 language_code, ("modern" if is_modern else "basic")
128 )
129 )
130
131 return mark_safe(
132 template.format(
133 lang_code=language_code,
134 lang_dir=lang_dir,
135 languages=json.dumps(languages),
136 use_modern="true" if is_modern else "false",
137 common_css_file=common_file,
138 subset_css_file=subset_file,
139 full_css_file=full_file,
140 # Temporary cache busting strategy.
141 # Would be better to use ManifestStaticFilesStorage
142 version=kolibri.__version__,
143 )
144 )
145
146
147 @register.simple_tag()
148 def kolibri_navigation_actions():
149 """
150 A tag to include an initial JS-object to bootstrap nav action data into the app.
151 :return: An html string
152 """
153 return webpack_asset_render(NavigationHook)
154
155
156 @register.simple_tag(takes_context=True)
157 def kolibri_set_urls(context):
158 js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)
159 js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)
160 js = (js_reverse_inline(context) +
161 """
162 Object.assign({kolibri}.urls, {global_object}.{js_var});
163 {kolibri}.urls.__staticURL = '{static_url}';
164 """.format(
165 kolibri=conf.KOLIBRI_CORE_JS_NAME,
166 global_object=js_global_object_name,
167 js_var=js_var_name,
168 static_url=settings.STATIC_URL))
169 return mark_safe(js)
170
171
172 @register.simple_tag()
173 def kolibri_set_server_time():
174 html = ("<script type='text/javascript'>"
175 "{0}.utils.serverClock.setServerTime({1});"
176 "</script>".format(conf.KOLIBRI_CORE_JS_NAME,
177 json.dumps(now(), cls=DjangoJSONEncoder)))
178 return mark_safe(html)
179
180
181 @register.simple_tag(takes_context=True)
182 def kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):
183 response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'detail', **kwargs)
184 html = ("<script type='text/javascript'>"
185 "var model = {0}.resources.{1}.createModel(JSON.parse({2}));"
186 "model.synced = true;"
187 "</script>".format(
188 conf.KOLIBRI_CORE_JS_NAME,
189 api_resource,
190 json.dumps(JSONRenderer().render(response.data).decode('utf-8'))))
191 return mark_safe(html)
192
193
194 @register.simple_tag(takes_context=True)
195 def kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):
196 response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'list', **kwargs)
197 html = ("<script type='text/javascript'>"
198 "var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));"
199 "collection.synced = true;"
200 "</script>".format(conf.KOLIBRI_CORE_JS_NAME,
201 api_resource,
202 json.dumps(kwargs),
203 json.dumps(JSONRenderer().render(response.data).decode('utf-8')),
204 ))
205 return mark_safe(html)
206
207
208 def _replace_dict_values(check, replace, dict):
209 for (key, value) in iteritems(dict):
210 if dict[key] is check:
211 dict[key] = replace
212
213
214 def _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):
215 reversal = dict()
216 kwargs_check = 'kwargs_'
217 # remove prepended string and matching items from kwargs
218 for key in list(kwargs.keys()):
219 if kwargs_check in key:
220 item = kwargs.pop(key)
221 key = re.sub(kwargs_check, '', key)
222 reversal[key] = item
223 view, view_args, view_kwargs = resolve(reverse('kolibri:core:{0}-{1}'.format(base_name, route), kwargs=reversal))
224 # switch out None temporarily because invalid filtering and caching can occur
225 _replace_dict_values(None, str(''), kwargs)
226 request = copy.copy(context['request'])
227 request.GET = request.GET.copy()
228 for key in kwargs:
229 request.GET[key] = kwargs[key]
230 response = view(request, **view_kwargs)
231 _replace_dict_values(str(''), None, kwargs)
232 return response, kwargs
233
[end of kolibri/core/templatetags/kolibri_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py
--- a/kolibri/core/templatetags/kolibri_tags.py
+++ b/kolibri/core/templatetags/kolibri_tags.py
@@ -157,10 +157,9 @@
def kolibri_set_urls(context):
js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)
js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)
- js = (js_reverse_inline(context) +
- """
+ js = (js_reverse_inline(context) + """
Object.assign({kolibri}.urls, {global_object}.{js_var});
- {kolibri}.urls.__staticURL = '{static_url}';
+ {kolibri}.urls.__setStaticURL('{static_url}');
""".format(
kolibri=conf.KOLIBRI_CORE_JS_NAME,
global_object=js_global_object_name,
| {"golden_diff": "diff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py\n--- a/kolibri/core/templatetags/kolibri_tags.py\n+++ b/kolibri/core/templatetags/kolibri_tags.py\n@@ -157,10 +157,9 @@\n def kolibri_set_urls(context):\n js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)\n js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)\n- js = (js_reverse_inline(context) +\n- \"\"\"\n+ js = (js_reverse_inline(context) + \"\"\"\n Object.assign({kolibri}.urls, {global_object}.{js_var});\n- {kolibri}.urls.__staticURL = '{static_url}';\n+ {kolibri}.urls.__setStaticURL('{static_url}');\n \"\"\".format(\n kolibri=conf.KOLIBRI_CORE_JS_NAME,\n global_object=js_global_object_name,\n", "issue": "Some images are not found \n### Observed behavior\r\nIn the current Kolibri develop branch, some images are not found after building js bundle.\r\nI.e. \r\n`yarn devserver`works perfectly\r\n`yarn build & yarn django-devserver` gives a not-found error in the urls\r\n/user/kolibri-logo.svg\r\n/user/background.jpg\r\n\r\nIt looks like a path issue in the frontend\r\n\r\n### Expected behavior\r\nAll the images should be found when using the built bundles.\r\n\r\n### User-facing consequences\r\nBad look\r\n\r\n### Steps to reproduce\r\nUsing develop code\r\n`yarn build` will build the bundles\r\n`yarn django-devserver` will run only the Django webserver.\r\nBrowsing in Kolibri will be visible\r\n\r\n\r\n### Context\r\n* Kolibri develop branch\r\n* Tested with Google Chrome\r\n* OS: Linux\r\n\n", "before_files": [{"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport copy\nimport json\nimport re\n\nimport user_agents\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.urlresolvers import resolve\nfrom django.core.urlresolvers import reverse\nfrom django.utils.html import mark_safe\nfrom django.utils.timezone import now\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.js_reverse_settings import JS_GLOBAL_OBJECT_NAME\nfrom django_js_reverse.js_reverse_settings import JS_VAR_NAME\nfrom django_js_reverse.templatetags.js_reverse import js_reverse_inline\nfrom rest_framework.renderers import JSONRenderer\nfrom six import iteritems\n\nimport kolibri\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack.utils import webpack_asset_render\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef kolibri_content_cache_key():\n js = \"\"\"\n <script>\n var contentCacheKey = '{cache_key}';\n </script>\n \"\"\".format(\n cache_key=ContentCacheKey.get_cache_key(),\n )\n return mark_safe(js)\n\n\ndef _supports_modern_fonts(request):\n \"\"\"\n In order to use the modern font-loading strategy we need to ensure two things:\n\n 1. The browser needs to properly use the font-face unicode-range descriptor in order\n only load fonts when they are needed. This allows us to reference fonts for every\n supported alphabet while ensuring that the client doesn't download many megabytes\n of font data.\n\n 2. The browser needs to avoid a flash of invisible text (FOIT) while extra fonts are\n loading, and instead render text using the browser's default fonts (FOUT). This\n allows users to view and begin reading text, even if the fonts haven't loaded yet.\n With some browsers this means supporting the new font-display descriptor. The\n Edge browser uses FOUT instead of FOIT by default, and therefore doesn't need to\n support font-display.\n\n Based on https://caniuse.com/#feat=font-unicode-range\n \"\"\"\n\n if 'HTTP_USER_AGENT' not in request.META:\n return False\n\n browser = user_agents.parse(request.META['HTTP_USER_AGENT']).browser\n\n if browser.family == \"Edge\": # Edge only needs unicode-range, not font-display\n return browser.version[0] >= 17\n if browser.family in (\"Firefox\", \"Firefox Mobile\"):\n return browser.version[0] >= 58\n if browser.family in (\"Chrome\", \"Chrome Mobile\"):\n return browser.version[0] >= 60\n if browser.family == \"Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 1\n if browser.family == \"Opera\":\n return browser.version[0] >= 47\n if browser.family == \"Mobile Safari\":\n return browser.version[0] >= 11 and browser.version[1] >= 4\n\n return False\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_language_globals(context):\n\n template = \"\"\"\n <script>\n var languageCode = '{lang_code}';\n var languageDir = '{lang_dir}';\n var languages = JSON.parse('{languages}');\n var useModernFontLoading = {use_modern};\n </script>\n <link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{full_css_file}?v={version}\" rel=\"stylesheet\"/>\n \"\"\"\n\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = next((lang for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES if lang['intl_code'] == code), None)\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"] if lang_info else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n is_modern = _supports_modern_fonts(context[\"request\"])\n full_file = static(\n \"assets/fonts/noto-full.{}.{}.css\".format(\n language_code, (\"modern\" if is_modern else \"basic\")\n )\n )\n\n return mark_safe(\n template.format(\n lang_code=language_code,\n lang_dir=lang_dir,\n languages=json.dumps(languages),\n use_modern=\"true\" if is_modern else \"false\",\n common_css_file=common_file,\n subset_css_file=subset_file,\n full_css_file=full_file,\n # Temporary cache busting strategy.\n # Would be better to use ManifestStaticFilesStorage\n version=kolibri.__version__,\n )\n )\n\n\[email protected]_tag()\ndef kolibri_navigation_actions():\n \"\"\"\n A tag to include an initial JS-object to bootstrap nav action data into the app.\n :return: An html string\n \"\"\"\n return webpack_asset_render(NavigationHook)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_set_urls(context):\n js_global_object_name = getattr(settings, 'JS_REVERSE_JS_GLOBAL_OBJECT_NAME', JS_GLOBAL_OBJECT_NAME)\n js_var_name = getattr(settings, 'JS_REVERSE_JS_VAR_NAME', JS_VAR_NAME)\n js = (js_reverse_inline(context) +\n \"\"\"\n Object.assign({kolibri}.urls, {global_object}.{js_var});\n {kolibri}.urls.__staticURL = '{static_url}';\n \"\"\".format(\n kolibri=conf.KOLIBRI_CORE_JS_NAME,\n global_object=js_global_object_name,\n js_var=js_var_name,\n static_url=settings.STATIC_URL))\n return mark_safe(js)\n\n\[email protected]_tag()\ndef kolibri_set_server_time():\n html = (\"<script type='text/javascript'>\"\n \"{0}.utils.serverClock.setServerTime({1});\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n json.dumps(now(), cls=DjangoJSONEncoder)))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'detail', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var model = {0}.resources.{1}.createModel(JSON.parse({2}));\"\n \"model.synced = true;\"\n \"</script>\".format(\n conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(JSONRenderer().render(response.data).decode('utf-8'))))\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(context, base_name, api_resource, 'list', **kwargs)\n html = (\"<script type='text/javascript'>\"\n \"var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));\"\n \"collection.synced = true;\"\n \"</script>\".format(conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(kwargs),\n json.dumps(JSONRenderer().render(response.data).decode('utf-8')),\n ))\n return mark_safe(html)\n\n\ndef _replace_dict_values(check, replace, dict):\n for (key, value) in iteritems(dict):\n if dict[key] is check:\n dict[key] = replace\n\n\ndef _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):\n reversal = dict()\n kwargs_check = 'kwargs_'\n # remove prepended string and matching items from kwargs\n for key in list(kwargs.keys()):\n if kwargs_check in key:\n item = kwargs.pop(key)\n key = re.sub(kwargs_check, '', key)\n reversal[key] = item\n view, view_args, view_kwargs = resolve(reverse('kolibri:core:{0}-{1}'.format(base_name, route), kwargs=reversal))\n # switch out None temporarily because invalid filtering and caching can occur\n _replace_dict_values(None, str(''), kwargs)\n request = copy.copy(context['request'])\n request.GET = request.GET.copy()\n for key in kwargs:\n request.GET[key] = kwargs[key]\n response = view(request, **view_kwargs)\n _replace_dict_values(str(''), None, kwargs)\n return response, kwargs\n", "path": "kolibri/core/templatetags/kolibri_tags.py"}]} | 3,372 | 231 |
gh_patches_debug_34221 | rasdani/github-patches | git_diff | aws__aws-cli-4148 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
eks update-config - generates incorrect kube-config - missing aws-iam-authenticator command
Version: aws-cli/1.16.156 Python/2.7.5 Linux/3.10.0-957.12.1.el7.x86_64 botocore/1.12.146
kubectl : 1.12.7
iam-aws-authenticator: 1.12.7
```bash
aws --region eu-west-1 eks update-kubeconfig --name XXXXXX
```
it generates
```
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- eks
- get-token
- --cluster-name
- XXXXXX
command: aws ---> it should be aws-iam-authenticator
```
Obviously whatever the attempt with the get-token - it should not break the previous mode
or give me the choice
```
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- token
- -i
- xxxxx
command: aws-iam-authenticator
```
The above makes kubectl invoke the aws cli but results to
```
You must specify a region. You can also configure your region by running "aws configure".
```
</issue>
<code>
[start of awscli/customizations/eks/update_kubeconfig.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import os
15 import logging
16
17 from botocore.compat import OrderedDict
18
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.utils import uni_print
21 from awscli.customizations.eks.exceptions import EKSClusterError
22 from awscli.customizations.eks.kubeconfig import (Kubeconfig,
23 KubeconfigError,
24 KubeconfigLoader,
25 KubeconfigWriter,
26 KubeconfigValidator,
27 KubeconfigAppender)
28 from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump
29
30 LOG = logging.getLogger(__name__)
31
32 DEFAULT_PATH = os.path.expanduser("~/.kube/config")
33
34 # Use the endpoint for kubernetes 1.10
35 # To get the most recent endpoint we will need to
36 # Do a check on the cluster's version number
37 API_VERSION = "client.authentication.k8s.io/v1alpha1"
38
39 class UpdateKubeconfigCommand(BasicCommand):
40 NAME = 'update-kubeconfig'
41
42 DESCRIPTION = BasicCommand.FROM_FILE(
43 'eks',
44 'update-kubeconfig',
45 '_description.rst'
46 )
47
48 ARG_TABLE = [
49 {
50 'name': 'name',
51 'help_text': ("The name of the cluster for which "
52 "to create a kubeconfig entry. "
53 "This cluster must exist in your account and in the "
54 "specified or configured default Region "
55 "for your AWS CLI installation."),
56 'required': True
57 },
58 {
59 'name': 'kubeconfig',
60 'help_text': ("Optionally specify a kubeconfig file to append "
61 "with your configuration. "
62 "By default, the configuration is written to the "
63 "first file path in the KUBECONFIG "
64 "environment variable (if it is set) "
65 "or the default kubeconfig path (.kube/config) "
66 "in your home directory."),
67 'required': False
68 },
69 {
70 'name': 'role-arn',
71 'help_text': ("To assume a role for cluster authentication, "
72 "specify an IAM role ARN with this option. "
73 "For example, if you created a cluster "
74 "while assuming an IAM role, "
75 "then you must also assume that role to "
76 "connect to the cluster the first time."),
77 'required': False
78 },
79 {
80 'name': 'dry-run',
81 'action': 'store_true',
82 'default': False,
83 'help_text': ("Print the merged kubeconfig to stdout instead of "
84 "writing it to the specified file."),
85 'required': False
86 },
87 {
88 'name': 'verbose',
89 'action': 'store_true',
90 'default': False,
91 'help_text': ("Print more detailed output "
92 "when writing to the kubeconfig file, "
93 "including the appended entries.")
94 },
95 {
96 'name': 'alias',
97 'help_text': ("Alias for the cluster context name. "
98 "Defaults to match cluster ARN."),
99 'required': False
100 }
101 ]
102
103 def _display_entries(self, entries):
104 """
105 Display entries in yaml format
106
107 :param entries: a list of OrderedDicts to be printed
108 :type entries: list
109 """
110 uni_print("Entries:\n\n")
111 for entry in entries:
112 uni_print(ordered_yaml_dump(entry))
113 uni_print("\n")
114
115 def _run_main(self, parsed_args, parsed_globals):
116 client = EKSClient(self._session,
117 parsed_args.name,
118 parsed_args.role_arn,
119 parsed_globals)
120 new_cluster_dict = client.get_cluster_entry()
121 new_user_dict = client.get_user_entry()
122
123 config_selector = KubeconfigSelector(
124 os.environ.get("KUBECONFIG", ""),
125 parsed_args.kubeconfig
126 )
127 config = config_selector.choose_kubeconfig(
128 new_cluster_dict["name"]
129 )
130 updating_existing = config.has_cluster(new_cluster_dict["name"])
131 appender = KubeconfigAppender()
132 new_context_dict = appender.insert_cluster_user_pair(config,
133 new_cluster_dict,
134 new_user_dict,
135 parsed_args.alias)
136
137 if parsed_args.dry_run:
138 uni_print(config.dump_content())
139 else:
140 writer = KubeconfigWriter()
141 writer.write_kubeconfig(config)
142
143 if updating_existing:
144 uni_print("Updated context {0} in {1}\n".format(
145 new_context_dict["name"], config.path
146 ))
147 else:
148 uni_print("Added new context {0} to {1}\n".format(
149 new_context_dict["name"], config.path
150 ))
151
152 if parsed_args.verbose:
153 self._display_entries([
154 new_context_dict,
155 new_user_dict,
156 new_cluster_dict
157 ])
158
159
160
161 class KubeconfigSelector(object):
162
163 def __init__(self, env_variable, path_in, validator=None,
164 loader=None):
165 """
166 Parse KUBECONFIG into a list of absolute paths.
167 Also replace the empty list with DEFAULT_PATH
168
169 :param env_variable: KUBECONFIG as a long string
170 :type env_variable: string
171
172 :param path_in: The path passed in through the CLI
173 :type path_in: string or None
174 """
175 if validator is None:
176 validator = KubeconfigValidator()
177 self._validator = validator
178
179 if loader is None:
180 loader = KubeconfigLoader(validator)
181 self._loader = loader
182
183 if path_in is not None:
184 # Override environment variable
185 self._paths = [self._expand_path(path_in)]
186 else:
187 # Get the list of paths from the environment variable
188 if env_variable == "":
189 env_variable = DEFAULT_PATH
190 self._paths = [self._expand_path(element)
191 for element in env_variable.split(os.pathsep)
192 if len(element.strip()) > 0]
193 if len(self._paths) == 0:
194 self._paths = [DEFAULT_PATH]
195
196 def choose_kubeconfig(self, cluster_name):
197 """
198 Choose which kubeconfig file to read from.
199 If name is already an entry in one of the $KUBECONFIG files,
200 choose that one.
201 Otherwise choose the first file.
202
203 :param cluster_name: The name of the cluster which is going to be added
204 :type cluster_name: String
205
206 :return: a chosen Kubeconfig based on above rules
207 :rtype: Kubeconfig
208 """
209 # Search for an existing entry to update
210 for candidate_path in self._paths:
211 try:
212 loaded_config = self._loader.load_kubeconfig(candidate_path)
213
214 if loaded_config.has_cluster(cluster_name):
215 LOG.debug("Found entry to update at {0}".format(
216 candidate_path
217 ))
218 return loaded_config
219 except KubeconfigError as e:
220 LOG.warning("Passing {0}:{1}".format(candidate_path, e))
221
222 # No entry was found, use the first file in KUBECONFIG
223 #
224 # Note: This could raise KubeconfigErrors if paths[0] is corrupted
225 return self._loader.load_kubeconfig(self._paths[0])
226
227 def _expand_path(self, path):
228 """ A helper to expand a path to a full absolute path. """
229 return os.path.abspath(os.path.expanduser(path))
230
231
232 class EKSClient(object):
233 def __init__(self, session, cluster_name, role_arn, parsed_globals=None):
234 self._session = session
235 self._cluster_name = cluster_name
236 self._role_arn = role_arn
237 self._cluster_description = None
238 self._globals = parsed_globals
239
240 def _get_cluster_description(self):
241 """
242 Use an eks describe-cluster call to get the cluster description
243 Cache the response in self._cluster_description.
244 describe-cluster will only be called once.
245 """
246 if self._cluster_description is None:
247 if self._globals is None:
248 client = self._session.create_client("eks")
249 else:
250 client = self._session.create_client(
251 "eks",
252 region_name=self._globals.region,
253 endpoint_url=self._globals.endpoint_url,
254 verify=self._globals.verify_ssl
255 )
256 full_description = client.describe_cluster(name=self._cluster_name)
257 self._cluster_description = full_description["cluster"]
258
259 if "status" not in self._cluster_description:
260 raise EKSClusterError("Cluster not found")
261 if self._cluster_description["status"] != "ACTIVE":
262 raise EKSClusterError("Cluster status not active")
263
264 return self._cluster_description
265
266 def get_cluster_entry(self):
267 """
268 Return a cluster entry generated using
269 the previously obtained description.
270 """
271
272 cert_data = self._get_cluster_description().get("certificateAuthority",
273 {"data": ""})["data"]
274 endpoint = self._get_cluster_description().get("endpoint")
275 arn = self._get_cluster_description().get("arn")
276
277 return OrderedDict([
278 ("cluster", OrderedDict([
279 ("certificate-authority-data", cert_data),
280 ("server", endpoint)
281 ])),
282 ("name", arn)
283 ])
284
285 def get_user_entry(self):
286 """
287 Return a user entry generated using
288 the previously obtained description.
289 """
290 generated_user = OrderedDict([
291 ("name", self._get_cluster_description().get("arn", "")),
292 ("user", OrderedDict([
293 ("exec", OrderedDict([
294 ("apiVersion", API_VERSION),
295 ("args",
296 [
297 "eks",
298 "get-token",
299 "--cluster-name",
300 self._cluster_name
301 ]),
302 ("command", "aws")
303 ]))
304 ]))
305 ])
306
307 if self._role_arn is not None:
308 generated_user["user"]["exec"]["args"].extend([
309 "--role",
310 self._role_arn
311 ])
312
313 if self._session.profile:
314 generated_user["user"]["exec"]["env"] = [OrderedDict([
315 ("name", "AWS_PROFILE"),
316 ("value", self._session.profile)
317 ])]
318
319 return generated_user
320
[end of awscli/customizations/eks/update_kubeconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py
--- a/awscli/customizations/eks/update_kubeconfig.py
+++ b/awscli/customizations/eks/update_kubeconfig.py
@@ -101,7 +101,7 @@
]
def _display_entries(self, entries):
- """
+ """
Display entries in yaml format
:param entries: a list of OrderedDicts to be printed
@@ -248,7 +248,7 @@
client = self._session.create_client("eks")
else:
client = self._session.create_client(
- "eks",
+ "eks",
region_name=self._globals.region,
endpoint_url=self._globals.endpoint_url,
verify=self._globals.verify_ssl
@@ -287,6 +287,9 @@
Return a user entry generated using
the previously obtained description.
"""
+
+ region = self._get_cluster_description().get("arn").split(":")[3]
+
generated_user = OrderedDict([
("name", self._get_cluster_description().get("arn", "")),
("user", OrderedDict([
@@ -294,10 +297,12 @@
("apiVersion", API_VERSION),
("args",
[
+ "--region",
+ region,
"eks",
"get-token",
"--cluster-name",
- self._cluster_name
+ self._cluster_name,
]),
("command", "aws")
]))
| {"golden_diff": "diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py\n--- a/awscli/customizations/eks/update_kubeconfig.py\n+++ b/awscli/customizations/eks/update_kubeconfig.py\n@@ -101,7 +101,7 @@\n ]\n \n def _display_entries(self, entries):\n- \"\"\" \n+ \"\"\"\n Display entries in yaml format\n \n :param entries: a list of OrderedDicts to be printed\n@@ -248,7 +248,7 @@\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n- \"eks\", \n+ \"eks\",\n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n@@ -287,6 +287,9 @@\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n+\n+ region = self._get_cluster_description().get(\"arn\").split(\":\")[3]\n+\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n@@ -294,10 +297,12 @@\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n+ \"--region\",\n+ region,\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n- self._cluster_name\n+ self._cluster_name,\n ]),\n (\"command\", \"aws\")\n ]))\n", "issue": "eks update-config - generates incorrect kube-config - missing aws-iam-authenticator command\nVersion: aws-cli/1.16.156 Python/2.7.5 Linux/3.10.0-957.12.1.el7.x86_64 botocore/1.12.146\r\nkubectl : 1.12.7\r\niam-aws-authenticator: 1.12.7\r\n\r\n\r\n```bash\r\naws --region eu-west-1 eks update-kubeconfig --name XXXXXX\r\n```\r\n\r\nit generates \r\n\r\n```\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - eks\r\n - get-token\r\n - --cluster-name\r\n - XXXXXX\r\n command: aws ---> it should be aws-iam-authenticator\r\n```\r\n\r\nObviously whatever the attempt with the get-token - it should not break the previous mode\r\nor give me the choice\r\n```\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - token\r\n - -i\r\n - xxxxx\r\n command: aws-iam-authenticator\r\n```\r\nThe above makes kubectl invoke the aws cli but results to\r\n\r\n```\r\nYou must specify a region. You can also configure your region by running \"aws configure\".\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n },\n {\n 'name': 'alias',\n 'help_text': (\"Alias for the cluster context name. \"\n \"Defaults to match cluster ARN.\"),\n 'required': False\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\" \n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict,\n parsed_args.alias)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\", \n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n self._cluster_name\n ]),\n (\"command\", \"aws\")\n ]))\n ]))\n ])\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"--role\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}]} | 3,971 | 341 |
gh_patches_debug_28304 | rasdani/github-patches | git_diff | canonical__microk8s-4015 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Connection failed. The joining node is already known to dqlite.
<!--
Thank you for submitting an issue. Please fill in the template below
information about the bug you encountered.
-->
#### Summary
If, joining a new node fails for any reason, the joining node gets added to .dqlite, and the cluster has to be fully shutdown to remediate.
#### What Should Happen Instead?
Either, `microk8s remove-node (broken ip)` should work, or there should be a less impactful way to remediate.
#### Reproduction Steps
Have not tried reproducing.
BUT, On a production cluster running 1.25, I provisioned a new server, added microk8s, and `microk8s add-node` followed by join.
I failed to specify the correct channel, so, the new node now running 1.26 failed to join the cluster.
Not a problem, let me remove microk8s, and install the correct channel.
```
Contacting cluster at 10.100.5.100
Connection failed. The joining node (10.100.5.102) is already known to dqlite (504).
```
https://microk8s.io/docs/restore-quorum
Is supposedly the method needed to resolve this issue, however, this is highly impactful to workloads, as the entire cluster has to be shutdown.
#### Related Tickets
https://github.com/canonical/microk8s/issues/2212
#### A few more details
```
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# cat /var/snap/microk8s/current/var/kubernetes/backend/cluster.yaml
- Address: 10.100.5.105:19001
ID: 3297041220608546238
Role: 0
- Address: 10.100.5.100:19001
ID: 6578832437747111275
Role: 0
- Address: 10.100.5.106:19001
ID: 16212301467739305109
Role: 0
- Address: 10.100.5.102:19001
ID: 17023170999447087085
Role: 1
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# microk8s remove-node 10.100.5.102
Error from server (NotFound): nodes "10.100.5.102" not found
Node 10.100.5.102 does not exist in Kubernetes.
root@kube01:~/Kubernetes/namespaces/readymotorsports.net# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kube06 Ready <none> 83d v1.25.5 10.100.5.106 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
kube01 Ready control-plane,master 95d v1.25.5 10.100.5.100 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
kube05 Ready <none> 108d v1.25.5 10.100.5.105 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8
```
</issue>
<code>
[start of scripts/wrappers/remove_node.py]
1 #!/usr/bin/python3
2 import json
3 import os
4 import shutil
5 import subprocess
6 import sys
7
8 import click
9 import netifaces
10
11 from common.cluster.utils import (
12 try_set_file_permissions,
13 is_node_running_dqlite,
14 )
15
16 snapdata_path = os.environ.get("SNAP_DATA")
17 snap_path = os.environ.get("SNAP")
18 callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
19
20 cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
21
22
23 def remove_dqlite_node(node, force=False):
24 try:
25 # Make sure this node exists
26 node_info = subprocess.check_output(
27 "{}/microk8s-kubectl.wrapper get no {} -o json".format(snap_path, node).split()
28 )
29 info = json.loads(node_info.decode())
30 node_address = None
31 for a in info["status"]["addresses"]:
32 if a["type"] == "InternalIP":
33 node_address = a["address"]
34 break
35
36 if not node_address:
37 print("Node {} is not part of the cluster.".format(node))
38 exit(1)
39
40 node_ep = None
41 my_ep, other_ep = get_dqlite_endpoints()
42 for ep in other_ep:
43 if ep.startswith("{}:".format(node_address)):
44 node_ep = ep
45
46 if node_ep and force:
47 delete_dqlite_node([node_ep], my_ep)
48 elif node_ep and not force:
49 print(
50 "Removal failed. Node {} is registered with dqlite. "
51 "Please, run first 'microk8s leave' on the departing node. \n"
52 "If the node is not available anymore and will never attempt to join the cluster "
53 "in the future use the '--force' flag \n"
54 "to unregister the node while removing it.".format(node)
55 )
56 exit(1)
57
58 except subprocess.CalledProcessError:
59 print("Node {} does not exist in Kubernetes.".format(node))
60 if force:
61 print("Attempting to remove {} from dqlite.".format(node))
62 # Make sure we do not have the node in dqlite.
63 # We assume the IP is provided to denote the
64 my_ep, other_ep = get_dqlite_endpoints()
65 for ep in other_ep:
66 if ep.startswith("{}:".format(node)):
67 print("Removing node entry found in dqlite.")
68 delete_dqlite_node([ep], my_ep)
69 exit(1)
70
71 remove_node(node)
72
73
74 def remove_node(node):
75 try:
76 # Make sure this node exists
77 subprocess.check_call(
78 "{}/microk8s-kubectl.wrapper get no {}".format(snap_path, node).split(),
79 stdout=subprocess.DEVNULL,
80 stderr=subprocess.DEVNULL,
81 )
82 except subprocess.CalledProcessError:
83 print("Node {} does not exist.".format(node))
84 exit(1)
85
86 remove_kubelet_token(node)
87 remove_callback_token(node)
88 subprocess.check_call(
89 "{}/microk8s-kubectl.wrapper delete no {}".format(snap_path, node).split(),
90 stdout=subprocess.DEVNULL,
91 stderr=subprocess.DEVNULL,
92 )
93
94
95 def remove_kubelet_token(node):
96 """
97 Remove a token for a node in the known tokens
98
99 :param node: the name of the node
100 """
101 file = "{}/credentials/known_tokens.csv".format(snapdata_path)
102 backup_file = "{}.backup".format(file)
103 token = "system:node:{}".format(node)
104 # That is a critical section. We need to protect it.
105 with open(backup_file, "w") as back_fp:
106 with open(file, "r") as fp:
107 for _, line in enumerate(fp):
108 if token in line:
109 continue
110 back_fp.write("{}".format(line))
111
112 try_set_file_permissions(backup_file)
113 shutil.copyfile(backup_file, file)
114
115
116 def get_dqlite_endpoints():
117 """
118 Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.
119
120 :return: two lists with the endpoints
121 """
122 out = subprocess.check_output(
123 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
124 "-k {dbdir}/cluster.key -f json k8s .cluster".format(
125 snappath=snap_path, dbdir=cluster_dir
126 ).split()
127 )
128 data = json.loads(out.decode())
129 ep_addresses = []
130 for ep in data:
131 ep_addresses.append(ep["Address"])
132 local_ips = []
133 for interface in netifaces.interfaces():
134 if netifaces.AF_INET not in netifaces.ifaddresses(interface):
135 continue
136 for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
137 local_ips.append(link["addr"])
138 my_ep = []
139 other_ep = []
140 for ep in ep_addresses:
141 found = False
142 for ip in local_ips:
143 if "{}:".format(ip) in ep:
144 my_ep.append(ep)
145 found = True
146 if not found:
147 other_ep.append(ep)
148
149 return my_ep, other_ep
150
151
152 def delete_dqlite_node(delete_node, dqlite_ep):
153 if len(delete_node) > 0 and "127.0.0.1" not in delete_node[0]:
154 for ep in dqlite_ep:
155 try:
156 cmd = (
157 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
158 "-k {dbdir}/cluster.key -f json k8s".format(
159 snappath=snap_path, dbdir=cluster_dir
160 ).split()
161 )
162 cmd.append(".remove {}".format(delete_node[0]))
163 subprocess.check_output(cmd)
164 break
165 except Exception as err:
166 print("Contacting node {} failed. Error:".format(ep))
167 print(repr(err))
168 exit(2)
169
170
171 def remove_callback_token(node):
172 """
173 Remove a callback token
174
175 :param node: the node
176 """
177 tmp_file = "{}.tmp".format(callback_tokens_file)
178 if not os.path.isfile(callback_tokens_file):
179 open(callback_tokens_file, "a+")
180 os.chmod(callback_tokens_file, 0o600)
181 with open(tmp_file, "w") as backup_fp:
182 os.chmod(tmp_file, 0o600)
183 with open(callback_tokens_file, "r+") as callback_fp:
184 # Entries are of the format: 'node_hostname:agent_port token'
185 # We need to get the node_hostname part
186 for line in callback_fp:
187 parts = line.split(":")
188 if parts[0] == node:
189 continue
190 else:
191 backup_fp.write(line)
192
193 try_set_file_permissions(tmp_file)
194 shutil.move(tmp_file, callback_tokens_file)
195
196
197 @click.command()
198 @click.argument("node", required=True)
199 @click.option(
200 "--force",
201 is_flag=True,
202 required=False,
203 default=False,
204 help="Force the node removal operation. (default: false)",
205 )
206 def reset(node, force):
207 """
208 Remove a node from the cluster
209 """
210 if is_node_running_dqlite():
211 remove_dqlite_node(node, force)
212 else:
213 remove_node(node)
214 sys.exit(0)
215
216
217 if __name__ == "__main__":
218 reset(prog_name="microk8s remove-node")
219
[end of scripts/wrappers/remove_node.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py
--- a/scripts/wrappers/remove_node.py
+++ b/scripts/wrappers/remove_node.py
@@ -8,13 +8,15 @@
import click
import netifaces
+from ipaddress import ip_address, IPv4Address
+
from common.cluster.utils import (
try_set_file_permissions,
is_node_running_dqlite,
)
-snapdata_path = os.environ.get("SNAP_DATA")
snap_path = os.environ.get("SNAP")
+snapdata_path = os.environ.get("SNAP_DATA")
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
@@ -22,6 +24,22 @@
def remove_dqlite_node(node, force=False):
try:
+ # If node is an IP address, find the node name.
+ if type(ip_address(node)) is IPv4Address:
+ node_info = subprocess.check_output(
+ "{}/microk8s-kubectl.wrapper get no -o json".format(snap_path).split()
+ )
+ info = json.loads(node_info.decode())
+ found = False
+ for n in info["items"]:
+ if found:
+ break
+ for a in n["status"]["addresses"]:
+ if a["type"] == "InternalIP" and a["address"] == node:
+ node = n["metadata"]["name"]
+ found = True
+ break
+
# Make sure this node exists
node_info = subprocess.check_output(
"{}/microk8s-kubectl.wrapper get no {} -o json".format(snap_path, node).split()
| {"golden_diff": "diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py\n--- a/scripts/wrappers/remove_node.py\n+++ b/scripts/wrappers/remove_node.py\n@@ -8,13 +8,15 @@\n import click\n import netifaces\n \n+from ipaddress import ip_address, IPv4Address\n+\n from common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n )\n \n-snapdata_path = os.environ.get(\"SNAP_DATA\")\n snap_path = os.environ.get(\"SNAP\")\n+snapdata_path = os.environ.get(\"SNAP_DATA\")\n callback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n \n cluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n@@ -22,6 +24,22 @@\n \n def remove_dqlite_node(node, force=False):\n try:\n+ # If node is an IP address, find the node name.\n+ if type(ip_address(node)) is IPv4Address:\n+ node_info = subprocess.check_output(\n+ \"{}/microk8s-kubectl.wrapper get no -o json\".format(snap_path).split()\n+ )\n+ info = json.loads(node_info.decode())\n+ found = False\n+ for n in info[\"items\"]:\n+ if found:\n+ break\n+ for a in n[\"status\"][\"addresses\"]:\n+ if a[\"type\"] == \"InternalIP\" and a[\"address\"] == node:\n+ node = n[\"metadata\"][\"name\"]\n+ found = True\n+ break\n+\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n", "issue": "Connection failed. The joining node is already known to dqlite.\n<!--\r\n Thank you for submitting an issue. Please fill in the template below\r\n information about the bug you encountered.\r\n-->\r\n\r\n#### Summary\r\nIf, joining a new node fails for any reason, the joining node gets added to .dqlite, and the cluster has to be fully shutdown to remediate.\r\n\r\n#### What Should Happen Instead?\r\nEither, `microk8s remove-node (broken ip)` should work, or there should be a less impactful way to remediate.\r\n\r\n#### Reproduction Steps\r\nHave not tried reproducing.\r\n\r\nBUT, On a production cluster running 1.25, I provisioned a new server, added microk8s, and `microk8s add-node` followed by join.\r\n\r\nI failed to specify the correct channel, so, the new node now running 1.26 failed to join the cluster. \r\n\r\nNot a problem, let me remove microk8s, and install the correct channel.\r\n\r\n```\r\nContacting cluster at 10.100.5.100\r\nConnection failed. The joining node (10.100.5.102) is already known to dqlite (504).\r\n```\r\n\r\nhttps://microk8s.io/docs/restore-quorum\r\n\r\nIs supposedly the method needed to resolve this issue, however, this is highly impactful to workloads, as the entire cluster has to be shutdown.\r\n\r\n#### Related Tickets\r\n\r\nhttps://github.com/canonical/microk8s/issues/2212\r\n\r\n\r\n#### A few more details\r\n\r\n```\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# cat /var/snap/microk8s/current/var/kubernetes/backend/cluster.yaml\r\n- Address: 10.100.5.105:19001\r\n ID: 3297041220608546238\r\n Role: 0\r\n- Address: 10.100.5.100:19001\r\n ID: 6578832437747111275\r\n Role: 0\r\n- Address: 10.100.5.106:19001\r\n ID: 16212301467739305109\r\n Role: 0\r\n- Address: 10.100.5.102:19001\r\n ID: 17023170999447087085\r\n Role: 1\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# microk8s remove-node 10.100.5.102\r\nError from server (NotFound): nodes \"10.100.5.102\" not found\r\nNode 10.100.5.102 does not exist in Kubernetes.\r\nroot@kube01:~/Kubernetes/namespaces/readymotorsports.net# kubectl get node -o wide\r\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\r\nkube06 Ready <none> 83d v1.25.5 10.100.5.106 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\nkube01 Ready control-plane,master 95d v1.25.5 10.100.5.100 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\nkube05 Ready <none> 108d v1.25.5 10.100.5.105 <none> Ubuntu 22.04.1 LTS 5.15.0-58-generic containerd://1.6.8\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport netifaces\n\nfrom common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n)\n\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\nsnap_path = os.environ.get(\"SNAP\")\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n\n\ndef remove_dqlite_node(node, force=False):\n try:\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n )\n info = json.loads(node_info.decode())\n node_address = None\n for a in info[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\":\n node_address = a[\"address\"]\n break\n\n if not node_address:\n print(\"Node {} is not part of the cluster.\".format(node))\n exit(1)\n\n node_ep = None\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node_address)):\n node_ep = ep\n\n if node_ep and force:\n delete_dqlite_node([node_ep], my_ep)\n elif node_ep and not force:\n print(\n \"Removal failed. Node {} is registered with dqlite. \"\n \"Please, run first 'microk8s leave' on the departing node. \\n\"\n \"If the node is not available anymore and will never attempt to join the cluster \"\n \"in the future use the '--force' flag \\n\"\n \"to unregister the node while removing it.\".format(node)\n )\n exit(1)\n\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist in Kubernetes.\".format(node))\n if force:\n print(\"Attempting to remove {} from dqlite.\".format(node))\n # Make sure we do not have the node in dqlite.\n # We assume the IP is provided to denote the\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node)):\n print(\"Removing node entry found in dqlite.\")\n delete_dqlite_node([ep], my_ep)\n exit(1)\n\n remove_node(node)\n\n\ndef remove_node(node):\n try:\n # Make sure this node exists\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper get no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist.\".format(node))\n exit(1)\n\n remove_kubelet_token(node)\n remove_callback_token(node)\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper delete no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n\n\ndef remove_kubelet_token(node):\n \"\"\"\n Remove a token for a node in the known tokens\n\n :param node: the name of the node\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n token = \"system:node:{}\".format(node)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if token in line:\n continue\n back_fp.write(\"{}\".format(line))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef get_dqlite_endpoints():\n \"\"\"\n Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.\n\n :return: two lists with the endpoints\n \"\"\"\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n data = json.loads(out.decode())\n ep_addresses = []\n for ep in data:\n ep_addresses.append(ep[\"Address\"])\n local_ips = []\n for interface in netifaces.interfaces():\n if netifaces.AF_INET not in netifaces.ifaddresses(interface):\n continue\n for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n local_ips.append(link[\"addr\"])\n my_ep = []\n other_ep = []\n for ep in ep_addresses:\n found = False\n for ip in local_ips:\n if \"{}:\".format(ip) in ep:\n my_ep.append(ep)\n found = True\n if not found:\n other_ep.append(ep)\n\n return my_ep, other_ep\n\n\ndef delete_dqlite_node(delete_node, dqlite_ep):\n if len(delete_node) > 0 and \"127.0.0.1\" not in delete_node[0]:\n for ep in dqlite_ep:\n try:\n cmd = (\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n cmd.append(\".remove {}\".format(delete_node[0]))\n subprocess.check_output(cmd)\n break\n except Exception as err:\n print(\"Contacting node {} failed. Error:\".format(ep))\n print(repr(err))\n exit(2)\n\n\ndef remove_callback_token(node):\n \"\"\"\n Remove a callback token\n\n :param node: the node\n \"\"\"\n tmp_file = \"{}.tmp\".format(callback_tokens_file)\n if not os.path.isfile(callback_tokens_file):\n open(callback_tokens_file, \"a+\")\n os.chmod(callback_tokens_file, 0o600)\n with open(tmp_file, \"w\") as backup_fp:\n os.chmod(tmp_file, 0o600)\n with open(callback_tokens_file, \"r+\") as callback_fp:\n # Entries are of the format: 'node_hostname:agent_port token'\n # We need to get the node_hostname part\n for line in callback_fp:\n parts = line.split(\":\")\n if parts[0] == node:\n continue\n else:\n backup_fp.write(line)\n\n try_set_file_permissions(tmp_file)\n shutil.move(tmp_file, callback_tokens_file)\n\n\[email protected]()\[email protected](\"node\", required=True)\[email protected](\n \"--force\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Force the node removal operation. (default: false)\",\n)\ndef reset(node, force):\n \"\"\"\n Remove a node from the cluster\n \"\"\"\n if is_node_running_dqlite():\n remove_dqlite_node(node, force)\n else:\n remove_node(node)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n reset(prog_name=\"microk8s remove-node\")\n", "path": "scripts/wrappers/remove_node.py"}]} | 3,670 | 385 |
gh_patches_debug_7459 | rasdani/github-patches | git_diff | rlworkgroup__garage-1558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backport #1554
</issue>
<code>
[start of src/garage/tf/plotter/plotter.py]
1 import atexit
2 from collections import namedtuple
3 from enum import Enum
4 import platform
5 from queue import Queue
6 from threading import Thread
7
8 import numpy as np
9 import tensorflow as tf
10
11 from garage.sampler.utils import rollout as default_rollout
12
13 __all__ = ['Plotter']
14
15
16 class Op(Enum):
17 STOP = 0
18 UPDATE = 1
19 DEMO = 2
20
21
22 Message = namedtuple('Message', ['op', 'args', 'kwargs'])
23
24
25 class Plotter:
26
27 # Static variable used to disable the plotter
28 enable = True
29 # List containing all plotters instantiated in the process
30 __plotters = []
31
32 def __init__(self,
33 env,
34 policy,
35 sess=None,
36 graph=None,
37 rollout=default_rollout):
38 Plotter.__plotters.append(self)
39 self.env = env
40 self.sess = tf.compat.v1.get_default_session(
41 ) if sess is None else sess
42 self.graph = tf.compat.v1.get_default_graph(
43 ) if graph is None else graph
44 with self.sess.as_default(), self.graph.as_default():
45 self.policy = policy.clone('plotter_policy')
46 self.rollout = rollout
47 self.worker_thread = Thread(target=self._start_worker, daemon=True)
48 self.queue = Queue()
49
50 # Needed in order to draw glfw window on the main thread
51 if ('Darwin' in platform.platform()):
52 self.rollout(env,
53 policy,
54 max_path_length=np.inf,
55 animated=True,
56 speedup=5)
57
58 def _start_worker(self):
59 env = None
60 policy = None
61 max_length = None
62 initial_rollout = True
63 try:
64 with self.sess.as_default(), self.sess.graph.as_default():
65 # Each iteration will process ALL messages currently in the
66 # queue
67 while True:
68 msgs = {}
69 # If true, block and yield processor
70 if initial_rollout:
71 msg = self.queue.get()
72 msgs[msg.op] = msg
73 # Only fetch the last message of each type
74 while not self.queue.empty():
75 msg = self.queue.get()
76 msgs[msg.op] = msg
77 else:
78 # Only fetch the last message of each type
79 while not self.queue.empty():
80 msg = self.queue.get_nowait()
81 msgs[msg.op] = msg
82
83 if Op.STOP in msgs:
84 self.queue.task_done()
85 break
86 if Op.UPDATE in msgs:
87 env, policy = msgs[Op.UPDATE].args
88 self.queue.task_done()
89 if Op.DEMO in msgs:
90 param_values, max_length = msgs[Op.DEMO].args
91 policy.set_param_values(param_values)
92 initial_rollout = False
93 self.rollout(env,
94 policy,
95 max_path_length=max_length,
96 animated=True,
97 speedup=5)
98 self.queue.task_done()
99 else:
100 if max_length:
101 self.rollout(env,
102 policy,
103 max_path_length=max_length,
104 animated=True,
105 speedup=5)
106 except KeyboardInterrupt:
107 pass
108
109 def close(self):
110 if self.worker_thread.is_alive():
111 while not self.queue.empty():
112 self.queue.get()
113 self.queue.task_done()
114 self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
115 self.queue.join()
116 self.worker_thread.join()
117
118 @staticmethod
119 def disable():
120 """Disable all instances of the Plotter class."""
121 Plotter.enable = False
122
123 @staticmethod
124 def get_plotters():
125 return Plotter.__plotters
126
127 def start(self):
128 if not Plotter.enable:
129 return
130 if not self.worker_thread.is_alive():
131 tf.compat.v1.get_variable_scope().reuse_variables()
132 self.worker_thread.start()
133 self.queue.put(
134 Message(op=Op.UPDATE,
135 args=(self.env, self.policy),
136 kwargs=None))
137 atexit.register(self.close)
138
139 def update_plot(self, policy, max_length=np.inf):
140 if not Plotter.enable:
141 return
142 if self.worker_thread.is_alive():
143 self.queue.put(
144 Message(op=Op.DEMO,
145 args=(policy.get_param_values(), max_length),
146 kwargs=None))
147
[end of src/garage/tf/plotter/plotter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py
--- a/src/garage/tf/plotter/plotter.py
+++ b/src/garage/tf/plotter/plotter.py
@@ -43,6 +43,7 @@
) if graph is None else graph
with self.sess.as_default(), self.graph.as_default():
self.policy = policy.clone('plotter_policy')
+ self.policy.build(policy.model.input)
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
| {"golden_diff": "diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py\n--- a/src/garage/tf/plotter/plotter.py\n+++ b/src/garage/tf/plotter/plotter.py\n@@ -43,6 +43,7 @@\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n+ self.policy.build(policy.model.input)\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n", "issue": "Backport #1554\n\n", "before_files": [{"content": "import atexit\nfrom collections import namedtuple\nfrom enum import Enum\nimport platform\nfrom queue import Queue\nfrom threading import Thread\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.sampler.utils import rollout as default_rollout\n\n__all__ = ['Plotter']\n\n\nclass Op(Enum):\n STOP = 0\n UPDATE = 1\n DEMO = 2\n\n\nMessage = namedtuple('Message', ['op', 'args', 'kwargs'])\n\n\nclass Plotter:\n\n # Static variable used to disable the plotter\n enable = True\n # List containing all plotters instantiated in the process\n __plotters = []\n\n def __init__(self,\n env,\n policy,\n sess=None,\n graph=None,\n rollout=default_rollout):\n Plotter.__plotters.append(self)\n self.env = env\n self.sess = tf.compat.v1.get_default_session(\n ) if sess is None else sess\n self.graph = tf.compat.v1.get_default_graph(\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n\n # Needed in order to draw glfw window on the main thread\n if ('Darwin' in platform.platform()):\n self.rollout(env,\n policy,\n max_path_length=np.inf,\n animated=True,\n speedup=5)\n\n def _start_worker(self):\n env = None\n policy = None\n max_length = None\n initial_rollout = True\n try:\n with self.sess.as_default(), self.sess.graph.as_default():\n # Each iteration will process ALL messages currently in the\n # queue\n while True:\n msgs = {}\n # If true, block and yield processor\n if initial_rollout:\n msg = self.queue.get()\n msgs[msg.op] = msg\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get()\n msgs[msg.op] = msg\n else:\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get_nowait()\n msgs[msg.op] = msg\n\n if Op.STOP in msgs:\n self.queue.task_done()\n break\n if Op.UPDATE in msgs:\n env, policy = msgs[Op.UPDATE].args\n self.queue.task_done()\n if Op.DEMO in msgs:\n param_values, max_length = msgs[Op.DEMO].args\n policy.set_param_values(param_values)\n initial_rollout = False\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n self.queue.task_done()\n else:\n if max_length:\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n except KeyboardInterrupt:\n pass\n\n def close(self):\n if self.worker_thread.is_alive():\n while not self.queue.empty():\n self.queue.get()\n self.queue.task_done()\n self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))\n self.queue.join()\n self.worker_thread.join()\n\n @staticmethod\n def disable():\n \"\"\"Disable all instances of the Plotter class.\"\"\"\n Plotter.enable = False\n\n @staticmethod\n def get_plotters():\n return Plotter.__plotters\n\n def start(self):\n if not Plotter.enable:\n return\n if not self.worker_thread.is_alive():\n tf.compat.v1.get_variable_scope().reuse_variables()\n self.worker_thread.start()\n self.queue.put(\n Message(op=Op.UPDATE,\n args=(self.env, self.policy),\n kwargs=None))\n atexit.register(self.close)\n\n def update_plot(self, policy, max_length=np.inf):\n if not Plotter.enable:\n return\n if self.worker_thread.is_alive():\n self.queue.put(\n Message(op=Op.DEMO,\n args=(policy.get_param_values(), max_length),\n kwargs=None))\n", "path": "src/garage/tf/plotter/plotter.py"}]} | 1,798 | 145 |
gh_patches_debug_1848 | rasdani/github-patches | git_diff | kivy__python-for-android-1163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Openssl recipe crashes on x86 arch
p4a branch: stable
buildozer: 0.33
bootstrap: sdl2
kivy: 1.10.0
Error message i get:
```
arm_arch.h:46:6: error: #error "unsupported ARM architecture"
```
</issue>
<code>
[start of pythonforandroid/recipes/openssl/__init__.py]
1 from functools import partial
2
3 from pythonforandroid.toolchain import Recipe, shprint, current_directory
4 import sh
5
6
7 class OpenSSLRecipe(Recipe):
8 version = '1.0.2h'
9 url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
10
11 def should_build(self, arch):
12 return not self.has_libs(arch, 'libssl' + self.version + '.so',
13 'libcrypto' + self.version + '.so')
14
15 def check_symbol(self, env, sofile, symbol):
16 nm = env.get('NM', 'nm')
17 syms = sh.sh('-c', "{} -gp {} | cut -d' ' -f3".format(
18 nm, sofile), _env=env).splitlines()
19 if symbol in syms:
20 return True
21 print('{} missing symbol {}; rebuilding'.format(sofile, symbol))
22 return False
23
24 def get_recipe_env(self, arch=None):
25 env = super(OpenSSLRecipe, self).get_recipe_env(arch)
26 env['OPENSSL_VERSION'] = self.version
27 env['CFLAGS'] += ' ' + env['LDFLAGS']
28 env['CC'] += ' ' + env['LDFLAGS']
29 return env
30
31 def select_build_arch(self, arch):
32 aname = arch.arch
33 if 'arm64' in aname:
34 return 'linux-aarch64'
35 if 'v7a' in aname:
36 return 'android-armv7'
37 if 'arm' in aname:
38 return 'android'
39 return 'linux-armv4'
40
41 def build_arch(self, arch):
42 env = self.get_recipe_env(arch)
43 with current_directory(self.get_build_dir(arch.arch)):
44 # sh fails with code 255 trying to execute ./Configure
45 # so instead we manually run perl passing in Configure
46 perl = sh.Command('perl')
47 buildarch = self.select_build_arch(arch)
48 shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)
49 self.apply_patch('disable-sover.patch', arch.arch)
50 self.apply_patch('rename-shared-lib.patch', arch.arch)
51
52 # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')
53 check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')
54 while True:
55 shprint(sh.make, 'build_libs', _env=env)
56 if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):
57 break
58 shprint(sh.make, 'clean', _env=env)
59
60 self.install_libs(arch, 'libssl' + self.version + '.so',
61 'libcrypto' + self.version + '.so')
62
63 recipe = OpenSSLRecipe()
64
[end of pythonforandroid/recipes/openssl/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py
--- a/pythonforandroid/recipes/openssl/__init__.py
+++ b/pythonforandroid/recipes/openssl/__init__.py
@@ -36,6 +36,8 @@
return 'android-armv7'
if 'arm' in aname:
return 'android'
+ if 'x86' in aname:
+ return 'android-x86'
return 'linux-armv4'
def build_arch(self, arch):
| {"golden_diff": "diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py\n--- a/pythonforandroid/recipes/openssl/__init__.py\n+++ b/pythonforandroid/recipes/openssl/__init__.py\n@@ -36,6 +36,8 @@\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n+ if 'x86' in aname:\n+ return 'android-x86'\n return 'linux-armv4'\n \n def build_arch(self, arch):\n", "issue": "Openssl recipe crashes on x86 arch\np4a branch: stable\r\nbuildozer: 0.33\r\nbootstrap: sdl2\r\nkivy: 1.10.0\r\n\r\nError message i get:\r\n```\r\narm_arch.h:46:6: error: #error \"unsupported ARM architecture\"\r\n```\n", "before_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}]} | 1,368 | 128 |
gh_patches_debug_34210 | rasdani/github-patches | git_diff | matrix-org__synapse-7663 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash on startup (1.14.0)
### Description
Restarting synapse runs the risk of crashing.
I run a daily stop-backup-restart routine, but since upgrading to 1.14.0, there have been infrequent occurrences of crashes after attempting to restart ([logs attached in gist](https://gist.github.com/karthanistyr/749a6f35fd8e13e15fc9571fd304dfe4)).
### Steps to reproduce
- stop the server (e.g. stop the docker compose stack, in my case)
- wait for some time (my backup routine lasts for 45 mins)
- restart server (restart the docker compose stack)
It's worth noting that using docker-compose, the containers themselves are scrapped when stopped and new ones are spun up on restart, reattaching volumes containing persistent data.
It had been running like this for more than two years (dating back to the 0.27.x series), and it's the first time something like this happens to my server.
It looks like some form of race condition as it is infrequent and I can usually restart the container as would the daily backup script and it starts up fine.
### Version information
- **Homeserver**: draak.fr
- **Version**: 1.14.0
- **Install method**: Docker image
- **Platform**: debian linux 10.3
- **Hardware**: small 4-core 8 GB VPS
</issue>
<code>
[start of synapse/config/cache.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 Matrix.org Foundation C.I.C.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import re
18 from typing import Callable, Dict
19
20 from ._base import Config, ConfigError
21
22 # The prefix for all cache factor-related environment variables
23 _CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
24
25 # Map from canonicalised cache name to cache.
26 _CACHES = {}
27
28 _DEFAULT_FACTOR_SIZE = 0.5
29 _DEFAULT_EVENT_CACHE_SIZE = "10K"
30
31
32 class CacheProperties(object):
33 def __init__(self):
34 # The default factor size for all caches
35 self.default_factor_size = float(
36 os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
37 )
38 self.resize_all_caches_func = None
39
40
41 properties = CacheProperties()
42
43
44 def _canonicalise_cache_name(cache_name: str) -> str:
45 """Gets the canonical form of the cache name.
46
47 Since we specify cache names in config and environment variables we need to
48 ignore case and special characters. For example, some caches have asterisks
49 in their name to denote that they're not attached to a particular database
50 function, and these asterisks need to be stripped out
51 """
52
53 cache_name = re.sub(r"[^A-Za-z_1-9]", "", cache_name)
54
55 return cache_name.lower()
56
57
58 def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
59 """Register a cache that's size can dynamically change
60
61 Args:
62 cache_name: A reference to the cache
63 cache_resize_callback: A callback function that will be ran whenever
64 the cache needs to be resized
65 """
66 # Some caches have '*' in them which we strip out.
67 cache_name = _canonicalise_cache_name(cache_name)
68
69 _CACHES[cache_name] = cache_resize_callback
70
71 # Ensure all loaded caches are sized appropriately
72 #
73 # This method should only run once the config has been read,
74 # as it uses values read from it
75 if properties.resize_all_caches_func:
76 properties.resize_all_caches_func()
77
78
79 class CacheConfig(Config):
80 section = "caches"
81 _environ = os.environ
82
83 @staticmethod
84 def reset():
85 """Resets the caches to their defaults. Used for tests."""
86 properties.default_factor_size = float(
87 os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
88 )
89 properties.resize_all_caches_func = None
90 _CACHES.clear()
91
92 def generate_config_section(self, **kwargs):
93 return """\
94 ## Caching ##
95
96 # Caching can be configured through the following options.
97 #
98 # A cache 'factor' is a multiplier that can be applied to each of
99 # Synapse's caches in order to increase or decrease the maximum
100 # number of entries that can be stored.
101
102 # The number of events to cache in memory. Not affected by
103 # caches.global_factor.
104 #
105 #event_cache_size: 10K
106
107 caches:
108 # Controls the global cache factor, which is the default cache factor
109 # for all caches if a specific factor for that cache is not otherwise
110 # set.
111 #
112 # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
113 # variable. Setting by environment variable takes priority over
114 # setting through the config file.
115 #
116 # Defaults to 0.5, which will half the size of all caches.
117 #
118 #global_factor: 1.0
119
120 # A dictionary of cache name to cache factor for that individual
121 # cache. Overrides the global cache factor for a given cache.
122 #
123 # These can also be set through environment variables comprised
124 # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
125 # letters and underscores. Setting by environment variable
126 # takes priority over setting through the config file.
127 # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
128 #
129 # Some caches have '*' and other characters that are not
130 # alphanumeric or underscores. These caches can be named with or
131 # without the special characters stripped. For example, to specify
132 # the cache factor for `*stateGroupCache*` via an environment
133 # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
134 #
135 per_cache_factors:
136 #get_users_who_share_room_with_user: 2.0
137 """
138
139 def read_config(self, config, **kwargs):
140 self.event_cache_size = self.parse_size(
141 config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
142 )
143 self.cache_factors = {} # type: Dict[str, float]
144
145 cache_config = config.get("caches") or {}
146 self.global_factor = cache_config.get(
147 "global_factor", properties.default_factor_size
148 )
149 if not isinstance(self.global_factor, (int, float)):
150 raise ConfigError("caches.global_factor must be a number.")
151
152 # Set the global one so that it's reflected in new caches
153 properties.default_factor_size = self.global_factor
154
155 # Load cache factors from the config
156 individual_factors = cache_config.get("per_cache_factors") or {}
157 if not isinstance(individual_factors, dict):
158 raise ConfigError("caches.per_cache_factors must be a dictionary")
159
160 # Canonicalise the cache names *before* updating with the environment
161 # variables.
162 individual_factors = {
163 _canonicalise_cache_name(key): val
164 for key, val in individual_factors.items()
165 }
166
167 # Override factors from environment if necessary
168 individual_factors.update(
169 {
170 _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val)
171 for key, val in self._environ.items()
172 if key.startswith(_CACHE_PREFIX + "_")
173 }
174 )
175
176 for cache, factor in individual_factors.items():
177 if not isinstance(factor, (int, float)):
178 raise ConfigError(
179 "caches.per_cache_factors.%s must be a number" % (cache,)
180 )
181 self.cache_factors[cache] = factor
182
183 # Resize all caches (if necessary) with the new factors we've loaded
184 self.resize_all_caches()
185
186 # Store this function so that it can be called from other classes without
187 # needing an instance of Config
188 properties.resize_all_caches_func = self.resize_all_caches
189
190 def resize_all_caches(self):
191 """Ensure all cache sizes are up to date
192
193 For each cache, run the mapped callback function with either
194 a specific cache factor or the default, global one.
195 """
196 for cache_name, callback in _CACHES.items():
197 new_factor = self.cache_factors.get(cache_name, self.global_factor)
198 callback(new_factor)
199
[end of synapse/config/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synapse/config/cache.py b/synapse/config/cache.py
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -15,6 +15,7 @@
import os
import re
+import threading
from typing import Callable, Dict
from ._base import Config, ConfigError
@@ -25,6 +26,9 @@
# Map from canonicalised cache name to cache.
_CACHES = {}
+# a lock on the contents of _CACHES
+_CACHES_LOCK = threading.Lock()
+
_DEFAULT_FACTOR_SIZE = 0.5
_DEFAULT_EVENT_CACHE_SIZE = "10K"
@@ -66,7 +70,10 @@
# Some caches have '*' in them which we strip out.
cache_name = _canonicalise_cache_name(cache_name)
- _CACHES[cache_name] = cache_resize_callback
+ # sometimes caches are initialised from background threads, so we need to make
+ # sure we don't conflict with another thread running a resize operation
+ with _CACHES_LOCK:
+ _CACHES[cache_name] = cache_resize_callback
# Ensure all loaded caches are sized appropriately
#
@@ -87,7 +94,8 @@
os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
)
properties.resize_all_caches_func = None
- _CACHES.clear()
+ with _CACHES_LOCK:
+ _CACHES.clear()
def generate_config_section(self, **kwargs):
return """\
@@ -193,6 +201,8 @@
For each cache, run the mapped callback function with either
a specific cache factor or the default, global one.
"""
- for cache_name, callback in _CACHES.items():
- new_factor = self.cache_factors.get(cache_name, self.global_factor)
- callback(new_factor)
+ # block other threads from modifying _CACHES while we iterate it.
+ with _CACHES_LOCK:
+ for cache_name, callback in _CACHES.items():
+ new_factor = self.cache_factors.get(cache_name, self.global_factor)
+ callback(new_factor)
| {"golden_diff": "diff --git a/synapse/config/cache.py b/synapse/config/cache.py\n--- a/synapse/config/cache.py\n+++ b/synapse/config/cache.py\n@@ -15,6 +15,7 @@\n \n import os\n import re\n+import threading\n from typing import Callable, Dict\n \n from ._base import Config, ConfigError\n@@ -25,6 +26,9 @@\n # Map from canonicalised cache name to cache.\n _CACHES = {}\n \n+# a lock on the contents of _CACHES\n+_CACHES_LOCK = threading.Lock()\n+\n _DEFAULT_FACTOR_SIZE = 0.5\n _DEFAULT_EVENT_CACHE_SIZE = \"10K\"\n \n@@ -66,7 +70,10 @@\n # Some caches have '*' in them which we strip out.\n cache_name = _canonicalise_cache_name(cache_name)\n \n- _CACHES[cache_name] = cache_resize_callback\n+ # sometimes caches are initialised from background threads, so we need to make\n+ # sure we don't conflict with another thread running a resize operation\n+ with _CACHES_LOCK:\n+ _CACHES[cache_name] = cache_resize_callback\n \n # Ensure all loaded caches are sized appropriately\n #\n@@ -87,7 +94,8 @@\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n properties.resize_all_caches_func = None\n- _CACHES.clear()\n+ with _CACHES_LOCK:\n+ _CACHES.clear()\n \n def generate_config_section(self, **kwargs):\n return \"\"\"\\\n@@ -193,6 +201,8 @@\n For each cache, run the mapped callback function with either\n a specific cache factor or the default, global one.\n \"\"\"\n- for cache_name, callback in _CACHES.items():\n- new_factor = self.cache_factors.get(cache_name, self.global_factor)\n- callback(new_factor)\n+ # block other threads from modifying _CACHES while we iterate it.\n+ with _CACHES_LOCK:\n+ for cache_name, callback in _CACHES.items():\n+ new_factor = self.cache_factors.get(cache_name, self.global_factor)\n+ callback(new_factor)\n", "issue": "Crash on startup (1.14.0)\n### Description\r\nRestarting synapse runs the risk of crashing.\r\n\r\nI run a daily stop-backup-restart routine, but since upgrading to 1.14.0, there have been infrequent occurrences of crashes after attempting to restart ([logs attached in gist](https://gist.github.com/karthanistyr/749a6f35fd8e13e15fc9571fd304dfe4)).\r\n\r\n### Steps to reproduce\r\n\r\n- stop the server (e.g. stop the docker compose stack, in my case)\r\n- wait for some time (my backup routine lasts for 45 mins)\r\n- restart server (restart the docker compose stack)\r\n\r\nIt's worth noting that using docker-compose, the containers themselves are scrapped when stopped and new ones are spun up on restart, reattaching volumes containing persistent data.\r\n\r\nIt had been running like this for more than two years (dating back to the 0.27.x series), and it's the first time something like this happens to my server.\r\n\r\nIt looks like some form of race condition as it is infrequent and I can usually restart the container as would the daily backup script and it starts up fine.\r\n\r\n### Version information\r\n- **Homeserver**: draak.fr\r\n- **Version**: 1.14.0\r\n- **Install method**: Docker image\r\n- **Platform**: debian linux 10.3\r\n- **Hardware**: small 4-core 8 GB VPS\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom typing import Callable, Dict\n\nfrom ._base import Config, ConfigError\n\n# The prefix for all cache factor-related environment variables\n_CACHE_PREFIX = \"SYNAPSE_CACHE_FACTOR\"\n\n# Map from canonicalised cache name to cache.\n_CACHES = {}\n\n_DEFAULT_FACTOR_SIZE = 0.5\n_DEFAULT_EVENT_CACHE_SIZE = \"10K\"\n\n\nclass CacheProperties(object):\n def __init__(self):\n # The default factor size for all caches\n self.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n self.resize_all_caches_func = None\n\n\nproperties = CacheProperties()\n\n\ndef _canonicalise_cache_name(cache_name: str) -> str:\n \"\"\"Gets the canonical form of the cache name.\n\n Since we specify cache names in config and environment variables we need to\n ignore case and special characters. For example, some caches have asterisks\n in their name to denote that they're not attached to a particular database\n function, and these asterisks need to be stripped out\n \"\"\"\n\n cache_name = re.sub(r\"[^A-Za-z_1-9]\", \"\", cache_name)\n\n return cache_name.lower()\n\n\ndef add_resizable_cache(cache_name: str, cache_resize_callback: Callable):\n \"\"\"Register a cache that's size can dynamically change\n\n Args:\n cache_name: A reference to the cache\n cache_resize_callback: A callback function that will be ran whenever\n the cache needs to be resized\n \"\"\"\n # Some caches have '*' in them which we strip out.\n cache_name = _canonicalise_cache_name(cache_name)\n\n _CACHES[cache_name] = cache_resize_callback\n\n # Ensure all loaded caches are sized appropriately\n #\n # This method should only run once the config has been read,\n # as it uses values read from it\n if properties.resize_all_caches_func:\n properties.resize_all_caches_func()\n\n\nclass CacheConfig(Config):\n section = \"caches\"\n _environ = os.environ\n\n @staticmethod\n def reset():\n \"\"\"Resets the caches to their defaults. Used for tests.\"\"\"\n properties.default_factor_size = float(\n os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)\n )\n properties.resize_all_caches_func = None\n _CACHES.clear()\n\n def generate_config_section(self, **kwargs):\n return \"\"\"\\\n ## Caching ##\n\n # Caching can be configured through the following options.\n #\n # A cache 'factor' is a multiplier that can be applied to each of\n # Synapse's caches in order to increase or decrease the maximum\n # number of entries that can be stored.\n\n # The number of events to cache in memory. Not affected by\n # caches.global_factor.\n #\n #event_cache_size: 10K\n\n caches:\n # Controls the global cache factor, which is the default cache factor\n # for all caches if a specific factor for that cache is not otherwise\n # set.\n #\n # This can also be set by the \"SYNAPSE_CACHE_FACTOR\" environment\n # variable. Setting by environment variable takes priority over\n # setting through the config file.\n #\n # Defaults to 0.5, which will half the size of all caches.\n #\n #global_factor: 1.0\n\n # A dictionary of cache name to cache factor for that individual\n # cache. Overrides the global cache factor for a given cache.\n #\n # These can also be set through environment variables comprised\n # of \"SYNAPSE_CACHE_FACTOR_\" + the name of the cache in capital\n # letters and underscores. Setting by environment variable\n # takes priority over setting through the config file.\n # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0\n #\n # Some caches have '*' and other characters that are not\n # alphanumeric or underscores. These caches can be named with or\n # without the special characters stripped. For example, to specify\n # the cache factor for `*stateGroupCache*` via an environment\n # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.\n #\n per_cache_factors:\n #get_users_who_share_room_with_user: 2.0\n \"\"\"\n\n def read_config(self, config, **kwargs):\n self.event_cache_size = self.parse_size(\n config.get(\"event_cache_size\", _DEFAULT_EVENT_CACHE_SIZE)\n )\n self.cache_factors = {} # type: Dict[str, float]\n\n cache_config = config.get(\"caches\") or {}\n self.global_factor = cache_config.get(\n \"global_factor\", properties.default_factor_size\n )\n if not isinstance(self.global_factor, (int, float)):\n raise ConfigError(\"caches.global_factor must be a number.\")\n\n # Set the global one so that it's reflected in new caches\n properties.default_factor_size = self.global_factor\n\n # Load cache factors from the config\n individual_factors = cache_config.get(\"per_cache_factors\") or {}\n if not isinstance(individual_factors, dict):\n raise ConfigError(\"caches.per_cache_factors must be a dictionary\")\n\n # Canonicalise the cache names *before* updating with the environment\n # variables.\n individual_factors = {\n _canonicalise_cache_name(key): val\n for key, val in individual_factors.items()\n }\n\n # Override factors from environment if necessary\n individual_factors.update(\n {\n _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val)\n for key, val in self._environ.items()\n if key.startswith(_CACHE_PREFIX + \"_\")\n }\n )\n\n for cache, factor in individual_factors.items():\n if not isinstance(factor, (int, float)):\n raise ConfigError(\n \"caches.per_cache_factors.%s must be a number\" % (cache,)\n )\n self.cache_factors[cache] = factor\n\n # Resize all caches (if necessary) with the new factors we've loaded\n self.resize_all_caches()\n\n # Store this function so that it can be called from other classes without\n # needing an instance of Config\n properties.resize_all_caches_func = self.resize_all_caches\n\n def resize_all_caches(self):\n \"\"\"Ensure all cache sizes are up to date\n\n For each cache, run the mapped callback function with either\n a specific cache factor or the default, global one.\n \"\"\"\n for cache_name, callback in _CACHES.items():\n new_factor = self.cache_factors.get(cache_name, self.global_factor)\n callback(new_factor)\n", "path": "synapse/config/cache.py"}]} | 2,942 | 487 |
gh_patches_debug_41566 | rasdani/github-patches | git_diff | vaexio__vaex-312 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
vaex.groupby type casting
In the current implementation of `groupby`, if a column is on of type `int`, calculating the mean will also be of type `int`, which is numerically not accurate. This should probably be cased to `float` somewhere behind the scenes.
</issue>
<code>
[start of packages/vaex-core/vaex/agg.py]
1 import numpy as np
2
3 from .stat import _Statistic
4 import vaex.superagg
5
6 aggregates = {}
7
8
9 def register(f, name=None):
10 name = name or f.__name__
11 aggregates[name] = f
12 return f
13
14
15 class AggregatorDescriptor(object):
16 def __repr__(self):
17 return 'vaex.agg.{}({!r})'.format(self.short_name, str(self.expression))
18
19 def finish(self, value):
20 return value
21
22
23 class AggregatorDescriptorBasic(AggregatorDescriptor):
24 def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):
25 self.name = name
26 self.short_name = short_name
27 self.expression = expression
28 self.agg_args = agg_args
29 if not multi_args:
30 if self.expression == '*':
31 self.expressions = []
32 else:
33 self.expressions = [self.expression]
34 else:
35 self.expressions = expression
36
37 def pretty_name(self, id=None):
38 id = id or "_".join(map(str, self.expression))
39 return '{0}_{1}'.format(id, self.short_name)
40
41 def add_operations(self, agg_task, edges=True, **kwargs):
42 value = agg_task.add_aggregation_operation(self, edges=edges, **kwargs)
43 @vaex.delayed
44 def finish(value):
45 return self.finish(value)
46 return finish(value)
47
48 def _create_operation(self, df, grid):
49 if self.expression == '*':
50 self.dtype_in = np.dtype('int64')
51 self.dtype_out = np.dtype('int64')
52 else:
53 self.dtype_in = df[str(self.expressions[0])].dtype
54 self.dtype_out = self.dtype_in
55 if self.short_name == "count":
56 self.dtype_out = np.dtype('int64')
57 agg_op_type = vaex.utils.find_type_from_dtype(vaex.superagg, self.name + "_", self.dtype_in)
58 agg_op = agg_op_type(grid, *self.agg_args)
59 return agg_op
60
61
62 class AggregatorDescriptorMulti(AggregatorDescriptor):
63 """Uses multiple operations/aggregation to calculate the final aggretation"""
64 def __init__(self, name, expression, short_name):
65 self.name = name
66 self.short_name = short_name
67 self.expression = expression
68 self.expressions = [self.expression]
69 self._add_sub_agg()
70
71 def _add_sub_agg(self):
72 pass
73
74 def pretty_name(self, id=None):
75 id = id or "_".join(map(str, self.expression))
76 return '{0}_{1}'.format(id, self.short_name)
77
78
79 class AggregatorDescriptorMean(AggregatorDescriptorMulti):
80 def __init__(self, name, expression, short_name="mean"):
81 super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)
82
83 def _add_sub_agg(self):
84 self.sum = sum(self.expression)
85 self.count = count(self.expression)
86
87 def add_operations(self, agg_task, **kwargs):
88 task_sum = self.sum.add_operations(agg_task, **kwargs)
89 task_count = self.count.add_operations(agg_task, **kwargs)
90 self.dtype_in = self.sum.dtype_in
91 self.dtype_out = self.sum.dtype_out
92 @vaex.delayed
93 def finish(sum, count):
94 dtype = sum.dtype
95 if sum.dtype.kind == 'M':
96 sum = sum.view('uint64')
97 count = count.view('uint64')
98 with np.errstate(divide='ignore', invalid='ignore'):
99 mean = sum / count
100 if dtype.kind != mean.dtype.kind:
101 # TODO: not sure why view does not work
102 mean = mean.astype(dtype)
103 return mean
104 return finish(task_sum, task_count)
105
106
107 class AggregatorDescriptorVar(AggregatorDescriptorMulti):
108 def __init__(self, name, expression, short_name="var", ddof=0):
109 super(AggregatorDescriptorVar, self).__init__(name, expression, short_name)
110 self.ddof = ddof
111
112 def add_operations(self, agg_task, **kwargs):
113 expression_sum = expression = agg_task.df[str(self.expression)]
114 expression = expression_sum = expression.astype('float64')
115 sum_moment = _sum_moment(str(expression_sum), 2)
116 sum_ = sum(str(expression_sum))
117 count_ = count(str(expression))
118
119 task_sum_moment = sum_moment.add_operations(agg_task, **kwargs)
120 task_sum = sum_.add_operations(agg_task, **kwargs)
121 task_count = count_.add_operations(agg_task, **kwargs)
122 self.dtype_in = sum_.dtype_in
123 self.dtype_out = sum_.dtype_out
124 @vaex.delayed
125 def finish(sum_moment, sum, count):
126 # print(self.sum, sum, task_sum)
127 dtype = sum.dtype
128 if sum.dtype.kind == 'M':
129 sum = sum.view('uint64')
130 sum_moment = sum_moment.view('uint64')
131 count = count.view('uint64')
132 with np.errstate(divide='ignore', invalid='ignore'):
133 mean = sum / count
134 print(sum, sum_moment)
135 raw_moments2 = sum_moment/count
136 variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)
137 if dtype.kind != mean.dtype.kind:
138 # TODO: not sure why view does not work
139 variance = variance.astype(dtype)
140 return self.finish(variance)
141 return finish(task_sum_moment, task_sum, task_count)
142
143
144 class AggregatorDescriptorStd(AggregatorDescriptorVar):
145 def finish(self, value):
146 return value**0.5
147
148 @register
149 def count(expression='*'):
150 '''Creates a count aggregation'''
151 return AggregatorDescriptorBasic('AggCount', expression, 'count')
152
153 @register
154 def sum(expression):
155 '''Creates a sum aggregation'''
156 return AggregatorDescriptorBasic('AggSum', expression, 'sum')
157
158 @register
159 def mean(expression):
160 '''Creates a mean aggregation'''
161 return AggregatorDescriptorMean('mean', expression, 'mean')
162
163 @register
164 def min(expression):
165 '''Creates a min aggregation'''
166 return AggregatorDescriptorBasic('AggMin', expression, 'min')
167
168 @register
169 def _sum_moment(expression, moment):
170 '''Creates a sum of moment aggregator'''
171 return AggregatorDescriptorBasic('AggSumMoment', expression, 'summoment', agg_args=[moment])
172
173 @register
174 def max(expression):
175 '''Creates a max aggregation'''
176 return AggregatorDescriptorBasic('AggMax', expression, 'max')
177
178 @register
179 def first(expression, order_expression):
180 '''Creates a max aggregation'''
181 return AggregatorDescriptorBasic('AggFirst', [expression, order_expression], 'first', multi_args=True)
182
183 @register
184 def std(expression, ddof=0):
185 '''Creates a standard deviation aggregation'''
186 return AggregatorDescriptorStd('std', expression, 'std', ddof=ddof)
187
188 @register
189 def var(expression, ddof=0):
190 '''Creates a variance aggregation'''
191 return AggregatorDescriptorVar('var', expression, 'var', ddof=ddof)
192
193 # @register
194 # def covar(x, y):
195 # '''Creates a standard deviation aggregation'''
196 # return _Statistic('covar', x, y)
197
198 # @register
199 # def correlation(x, y):
200 # '''Creates a standard deviation aggregation'''
201 # return _Statistic('correlation', x, y)
202
203
[end of packages/vaex-core/vaex/agg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/agg.py b/packages/vaex-core/vaex/agg.py
--- a/packages/vaex-core/vaex/agg.py
+++ b/packages/vaex-core/vaex/agg.py
@@ -24,7 +24,7 @@
def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):
self.name = name
self.short_name = short_name
- self.expression = expression
+ self.expression = str(expression)
self.agg_args = agg_args
if not multi_args:
if self.expression == '*':
@@ -66,10 +66,6 @@
self.short_name = short_name
self.expression = expression
self.expressions = [self.expression]
- self._add_sub_agg()
-
- def _add_sub_agg(self):
- pass
def pretty_name(self, id=None):
id = id or "_".join(map(str, self.expression))
@@ -80,15 +76,20 @@
def __init__(self, name, expression, short_name="mean"):
super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)
- def _add_sub_agg(self):
- self.sum = sum(self.expression)
- self.count = count(self.expression)
-
def add_operations(self, agg_task, **kwargs):
- task_sum = self.sum.add_operations(agg_task, **kwargs)
- task_count = self.count.add_operations(agg_task, **kwargs)
- self.dtype_in = self.sum.dtype_in
- self.dtype_out = self.sum.dtype_out
+ expression = expression_sum = expression = agg_task.df[str(self.expression)]
+ # ints, floats and bools are upcasted
+ if expression_sum.dtype.kind in "buif":
+ expression = expression_sum = expression_sum.astype('float64')
+
+ sum_agg = sum(expression_sum)
+ count_agg = count(expression)
+
+ task_sum = sum_agg.add_operations(agg_task, **kwargs)
+ task_count = count_agg.add_operations(agg_task, **kwargs)
+ self.dtype_in = sum_agg.dtype_in
+ self.dtype_out = sum_agg.dtype_out
+
@vaex.delayed
def finish(sum, count):
dtype = sum.dtype
@@ -101,6 +102,7 @@
# TODO: not sure why view does not work
mean = mean.astype(dtype)
return mean
+
return finish(task_sum, task_count)
@@ -123,7 +125,6 @@
self.dtype_out = sum_.dtype_out
@vaex.delayed
def finish(sum_moment, sum, count):
- # print(self.sum, sum, task_sum)
dtype = sum.dtype
if sum.dtype.kind == 'M':
sum = sum.view('uint64')
@@ -131,7 +132,6 @@
count = count.view('uint64')
with np.errstate(divide='ignore', invalid='ignore'):
mean = sum / count
- print(sum, sum_moment)
raw_moments2 = sum_moment/count
variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)
if dtype.kind != mean.dtype.kind:
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/agg.py b/packages/vaex-core/vaex/agg.py\n--- a/packages/vaex-core/vaex/agg.py\n+++ b/packages/vaex-core/vaex/agg.py\n@@ -24,7 +24,7 @@\n def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):\n self.name = name\n self.short_name = short_name\n- self.expression = expression\n+ self.expression = str(expression)\n self.agg_args = agg_args\n if not multi_args:\n if self.expression == '*':\n@@ -66,10 +66,6 @@\n self.short_name = short_name\n self.expression = expression\n self.expressions = [self.expression]\n- self._add_sub_agg()\n-\n- def _add_sub_agg(self):\n- pass\n \n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n@@ -80,15 +76,20 @@\n def __init__(self, name, expression, short_name=\"mean\"):\n super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)\n \n- def _add_sub_agg(self):\n- self.sum = sum(self.expression)\n- self.count = count(self.expression)\n-\n def add_operations(self, agg_task, **kwargs):\n- task_sum = self.sum.add_operations(agg_task, **kwargs)\n- task_count = self.count.add_operations(agg_task, **kwargs)\n- self.dtype_in = self.sum.dtype_in\n- self.dtype_out = self.sum.dtype_out\n+ expression = expression_sum = expression = agg_task.df[str(self.expression)]\n+ # ints, floats and bools are upcasted\n+ if expression_sum.dtype.kind in \"buif\":\n+ expression = expression_sum = expression_sum.astype('float64')\n+\n+ sum_agg = sum(expression_sum)\n+ count_agg = count(expression)\n+\n+ task_sum = sum_agg.add_operations(agg_task, **kwargs)\n+ task_count = count_agg.add_operations(agg_task, **kwargs)\n+ self.dtype_in = sum_agg.dtype_in\n+ self.dtype_out = sum_agg.dtype_out\n+\n @vaex.delayed\n def finish(sum, count):\n dtype = sum.dtype\n@@ -101,6 +102,7 @@\n # TODO: not sure why view does not work\n mean = mean.astype(dtype)\n return mean\n+\n return finish(task_sum, task_count)\n \n \n@@ -123,7 +125,6 @@\n self.dtype_out = sum_.dtype_out\n @vaex.delayed\n def finish(sum_moment, sum, count):\n- # print(self.sum, sum, task_sum)\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n@@ -131,7 +132,6 @@\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n- print(sum, sum_moment)\n raw_moments2 = sum_moment/count\n variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)\n if dtype.kind != mean.dtype.kind:\n", "issue": "vaex.groupby type casting\nIn the current implementation of `groupby`, if a column is on of type `int`, calculating the mean will also be of type `int`, which is numerically not accurate. This should probably be cased to `float` somewhere behind the scenes. \n", "before_files": [{"content": "import numpy as np\n\nfrom .stat import _Statistic\nimport vaex.superagg\n\naggregates = {}\n\n\ndef register(f, name=None):\n name = name or f.__name__\n aggregates[name] = f\n return f\n\n\nclass AggregatorDescriptor(object):\n def __repr__(self):\n return 'vaex.agg.{}({!r})'.format(self.short_name, str(self.expression))\n\n def finish(self, value):\n return value\n\n\nclass AggregatorDescriptorBasic(AggregatorDescriptor):\n def __init__(self, name, expression, short_name, multi_args=False, agg_args=[]):\n self.name = name\n self.short_name = short_name\n self.expression = expression\n self.agg_args = agg_args\n if not multi_args:\n if self.expression == '*':\n self.expressions = []\n else:\n self.expressions = [self.expression]\n else:\n self.expressions = expression\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n def add_operations(self, agg_task, edges=True, **kwargs):\n value = agg_task.add_aggregation_operation(self, edges=edges, **kwargs)\n @vaex.delayed\n def finish(value):\n return self.finish(value)\n return finish(value)\n\n def _create_operation(self, df, grid):\n if self.expression == '*':\n self.dtype_in = np.dtype('int64')\n self.dtype_out = np.dtype('int64')\n else:\n self.dtype_in = df[str(self.expressions[0])].dtype\n self.dtype_out = self.dtype_in\n if self.short_name == \"count\":\n self.dtype_out = np.dtype('int64')\n agg_op_type = vaex.utils.find_type_from_dtype(vaex.superagg, self.name + \"_\", self.dtype_in)\n agg_op = agg_op_type(grid, *self.agg_args)\n return agg_op\n\n\nclass AggregatorDescriptorMulti(AggregatorDescriptor):\n \"\"\"Uses multiple operations/aggregation to calculate the final aggretation\"\"\"\n def __init__(self, name, expression, short_name):\n self.name = name\n self.short_name = short_name\n self.expression = expression\n self.expressions = [self.expression]\n self._add_sub_agg()\n\n def _add_sub_agg(self):\n pass\n\n def pretty_name(self, id=None):\n id = id or \"_\".join(map(str, self.expression))\n return '{0}_{1}'.format(id, self.short_name)\n\n\nclass AggregatorDescriptorMean(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"mean\"):\n super(AggregatorDescriptorMean, self).__init__(name, expression, short_name)\n\n def _add_sub_agg(self):\n self.sum = sum(self.expression)\n self.count = count(self.expression)\n\n def add_operations(self, agg_task, **kwargs):\n task_sum = self.sum.add_operations(agg_task, **kwargs)\n task_count = self.count.add_operations(agg_task, **kwargs)\n self.dtype_in = self.sum.dtype_in\n self.dtype_out = self.sum.dtype_out\n @vaex.delayed\n def finish(sum, count):\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n mean = mean.astype(dtype)\n return mean\n return finish(task_sum, task_count)\n\n\nclass AggregatorDescriptorVar(AggregatorDescriptorMulti):\n def __init__(self, name, expression, short_name=\"var\", ddof=0):\n super(AggregatorDescriptorVar, self).__init__(name, expression, short_name)\n self.ddof = ddof\n\n def add_operations(self, agg_task, **kwargs):\n expression_sum = expression = agg_task.df[str(self.expression)]\n expression = expression_sum = expression.astype('float64')\n sum_moment = _sum_moment(str(expression_sum), 2)\n sum_ = sum(str(expression_sum))\n count_ = count(str(expression))\n\n task_sum_moment = sum_moment.add_operations(agg_task, **kwargs)\n task_sum = sum_.add_operations(agg_task, **kwargs)\n task_count = count_.add_operations(agg_task, **kwargs)\n self.dtype_in = sum_.dtype_in\n self.dtype_out = sum_.dtype_out\n @vaex.delayed\n def finish(sum_moment, sum, count):\n # print(self.sum, sum, task_sum)\n dtype = sum.dtype\n if sum.dtype.kind == 'M':\n sum = sum.view('uint64')\n sum_moment = sum_moment.view('uint64')\n count = count.view('uint64')\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = sum / count\n print(sum, sum_moment)\n raw_moments2 = sum_moment/count\n variance = (raw_moments2 - mean**2) #* count/(count-self.ddof)\n if dtype.kind != mean.dtype.kind:\n # TODO: not sure why view does not work\n variance = variance.astype(dtype)\n return self.finish(variance)\n return finish(task_sum_moment, task_sum, task_count)\n\n\nclass AggregatorDescriptorStd(AggregatorDescriptorVar):\n def finish(self, value):\n return value**0.5\n\n@register\ndef count(expression='*'):\n '''Creates a count aggregation'''\n return AggregatorDescriptorBasic('AggCount', expression, 'count')\n\n@register\ndef sum(expression):\n '''Creates a sum aggregation'''\n return AggregatorDescriptorBasic('AggSum', expression, 'sum')\n\n@register\ndef mean(expression):\n '''Creates a mean aggregation'''\n return AggregatorDescriptorMean('mean', expression, 'mean')\n\n@register\ndef min(expression):\n '''Creates a min aggregation'''\n return AggregatorDescriptorBasic('AggMin', expression, 'min')\n\n@register\ndef _sum_moment(expression, moment):\n '''Creates a sum of moment aggregator'''\n return AggregatorDescriptorBasic('AggSumMoment', expression, 'summoment', agg_args=[moment])\n\n@register\ndef max(expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggMax', expression, 'max')\n\n@register\ndef first(expression, order_expression):\n '''Creates a max aggregation'''\n return AggregatorDescriptorBasic('AggFirst', [expression, order_expression], 'first', multi_args=True)\n\n@register\ndef std(expression, ddof=0):\n '''Creates a standard deviation aggregation'''\n return AggregatorDescriptorStd('std', expression, 'std', ddof=ddof)\n\n@register\ndef var(expression, ddof=0):\n '''Creates a variance aggregation'''\n return AggregatorDescriptorVar('var', expression, 'var', ddof=ddof)\n\n# @register\n# def covar(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('covar', x, y)\n\n# @register\n# def correlation(x, y):\n# '''Creates a standard deviation aggregation'''\n# return _Statistic('correlation', x, y)\n\n", "path": "packages/vaex-core/vaex/agg.py"}]} | 2,734 | 742 |
gh_patches_debug_25109 | rasdani/github-patches | git_diff | jazzband__pip-tools-737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip-sync uses first pip script on PATH instead of the one from the Python version it is run with
Describe the issue briefly here.
##### Environment Versions
1. OS Type: Android 8.1 aarch64/Latest Termux
1. Python version: 3.7.2
1. pip version: `pip 18.1 from /data/data/com.termux/files/usr/lib/python2.7/site-packages/pip (python 2.7)` (pip3.7 --version is `pip 19.0.2 from /data/data/com.termux/files/usr/lib/python3.7/site-packages/pip (python 3.7)
1. pip-tools version: `pip-compile, version 3.3.2`
##### Steps to replicate
1. Get Termux
2. Run `pkg up && pkg install python python2`
3. Install pip on Python 3 (get-pip.py)
4. Install pip on Python 2 (get-pip.py)
5. Install piptools on Python 3
6. Use `pip-sync` with a requirements.txt file and watch it installing to Python 2
##### Expected result
Installs into Python 3 if ran on Python 3 and to Python 2 if ran on Python2
##### Actual result
Installs to whichever Python version `pip` script on `PATH` is from
##### Extra information
If using Python 2 and 3 together, an extra `pip-compile` and `pip-sync` script per Python major and per Python minor version to be able to select Python version to run piptools on would be very useful.
</issue>
<code>
[start of piptools/sync.py]
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call
6
7 from piptools._compat import stdlib_pkgs, DEV_PKGS
8 from . import click
9 from .exceptions import IncompatibleRequirements, UnsupportedConstraint
10 from .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq
11
12 PACKAGES_TO_IGNORE = [
13 '-markerlib',
14 'pip',
15 'pip-tools',
16 'pip-review',
17 'pkg-resources',
18 ] + list(stdlib_pkgs) + list(DEV_PKGS)
19
20
21 def dependency_tree(installed_keys, root_key):
22 """
23 Calculate the dependency tree for the package `root_key` and return
24 a collection of all its dependencies. Uses a DFS traversal algorithm.
25
26 `installed_keys` should be a {key: requirement} mapping, e.g.
27 {'django': from_line('django==1.8')}
28 `root_key` should be the key to return the dependency tree for.
29 """
30 dependencies = set()
31 queue = collections.deque()
32
33 if root_key in installed_keys:
34 dep = installed_keys[root_key]
35 queue.append(dep)
36
37 while queue:
38 v = queue.popleft()
39 key = key_from_req(v)
40 if key in dependencies:
41 continue
42
43 dependencies.add(key)
44
45 for dep_specifier in v.requires():
46 dep_name = key_from_req(dep_specifier)
47 if dep_name in installed_keys:
48 dep = installed_keys[dep_name]
49
50 if dep_specifier.specifier.contains(dep.version):
51 queue.append(dep)
52
53 return dependencies
54
55
56 def get_dists_to_ignore(installed):
57 """
58 Returns a collection of package names to ignore when performing pip-sync,
59 based on the currently installed environment. For example, when pip-tools
60 is installed in the local environment, it should be ignored, including all
61 of its dependencies (e.g. click). When pip-tools is not installed
62 locally, click should also be installed/uninstalled depending on the given
63 requirements.
64 """
65 installed_keys = {key_from_req(r): r for r in installed}
66 return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))
67
68
69 def merge(requirements, ignore_conflicts):
70 by_key = {}
71
72 for ireq in requirements:
73 if ireq.link is not None and not ireq.editable:
74 msg = ('pip-compile does not support URLs as packages, unless they are editable. '
75 'Perhaps add -e option?')
76 raise UnsupportedConstraint(msg, ireq)
77
78 key = ireq.link or key_from_req(ireq.req)
79
80 if not ignore_conflicts:
81 existing_ireq = by_key.get(key)
82 if existing_ireq:
83 # NOTE: We check equality here since we can assume that the
84 # requirements are all pinned
85 if ireq.specifier != existing_ireq.specifier:
86 raise IncompatibleRequirements(ireq, existing_ireq)
87
88 # TODO: Always pick the largest specifier in case of a conflict
89 by_key[key] = ireq
90
91 return by_key.values()
92
93
94 def diff(compiled_requirements, installed_dists):
95 """
96 Calculate which packages should be installed or uninstalled, given a set
97 of compiled requirements and a list of currently installed modules.
98 """
99 requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}
100
101 satisfied = set() # holds keys
102 to_install = set() # holds InstallRequirement objects
103 to_uninstall = set() # holds keys
104
105 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
106 for dist in installed_dists:
107 key = key_from_req(dist)
108 if key not in requirements_lut or not requirements_lut[key].match_markers():
109 to_uninstall.add(key)
110 elif requirements_lut[key].specifier.contains(dist.version):
111 satisfied.add(key)
112
113 for key, requirement in requirements_lut.items():
114 if key not in satisfied and requirement.match_markers():
115 to_install.add(requirement)
116
117 # Make sure to not uninstall any packages that should be ignored
118 to_uninstall -= set(pkgs_to_ignore)
119
120 return (to_install, to_uninstall)
121
122
123 def sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):
124 """
125 Install and uninstalls the given sets of modules.
126 """
127 if not to_uninstall and not to_install:
128 click.echo("Everything up-to-date")
129
130 if pip_flags is None:
131 pip_flags = []
132
133 if not verbose:
134 pip_flags += ['-q']
135
136 if os.environ.get('VIRTUAL_ENV'):
137 # find pip via PATH
138 pip = 'pip'
139 else:
140 # find pip in same directory as pip-sync entry-point script
141 pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
142
143 if to_uninstall:
144 if dry_run:
145 click.echo("Would uninstall:")
146 for pkg in to_uninstall:
147 click.echo(" {}".format(pkg))
148 else:
149 check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
150
151 if to_install:
152 if install_flags is None:
153 install_flags = []
154 if dry_run:
155 click.echo("Would install:")
156 for ireq in to_install:
157 click.echo(" {}".format(format_requirement(ireq)))
158 else:
159 # prepare requirement lines
160 req_lines = []
161 for ireq in sorted(to_install, key=key_from_ireq):
162 ireq_hashes = get_hashes_from_ireq(ireq)
163 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
164
165 # save requirement lines to a temporary file
166 tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
167 tmp_req_file.write('\n'.join(req_lines))
168 tmp_req_file.close()
169
170 try:
171 check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
172 finally:
173 os.unlink(tmp_req_file.name)
174
175 return 0
176
[end of piptools/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -133,20 +133,13 @@
if not verbose:
pip_flags += ['-q']
- if os.environ.get('VIRTUAL_ENV'):
- # find pip via PATH
- pip = 'pip'
- else:
- # find pip in same directory as pip-sync entry-point script
- pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
-
if to_uninstall:
if dry_run:
click.echo("Would uninstall:")
for pkg in to_uninstall:
click.echo(" {}".format(pkg))
else:
- check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
+ check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
if to_install:
if install_flags is None:
@@ -168,7 +161,9 @@
tmp_req_file.close()
try:
- check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
+ check_call(
+ [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags
+ )
finally:
os.unlink(tmp_req_file.name)
| {"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -133,20 +133,13 @@\n if not verbose:\n pip_flags += ['-q']\n \n- if os.environ.get('VIRTUAL_ENV'):\n- # find pip via PATH\n- pip = 'pip'\n- else:\n- # find pip in same directory as pip-sync entry-point script\n- pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n-\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n- check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n+ check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n \n if to_install:\n if install_flags is None:\n@@ -168,7 +161,9 @@\n tmp_req_file.close()\n \n try:\n- check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n+ check_call(\n+ [sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags\n+ )\n finally:\n os.unlink(tmp_req_file.name)\n", "issue": "pip-sync uses first pip script on PATH instead of the one from the Python version it is run with\nDescribe the issue briefly here.\r\n\r\n##### Environment Versions\r\n\r\n1. OS Type: Android 8.1 aarch64/Latest Termux\r\n1. Python version: 3.7.2\r\n1. pip version: `pip 18.1 from /data/data/com.termux/files/usr/lib/python2.7/site-packages/pip (python 2.7)` (pip3.7 --version is `pip 19.0.2 from /data/data/com.termux/files/usr/lib/python3.7/site-packages/pip (python 3.7)\r\n1. pip-tools version: `pip-compile, version 3.3.2`\r\n\r\n##### Steps to replicate\r\n\r\n1. Get Termux\r\n2. Run `pkg up && pkg install python python2`\r\n3. Install pip on Python 3 (get-pip.py)\r\n4. Install pip on Python 2 (get-pip.py)\r\n5. Install piptools on Python 3\r\n6. Use `pip-sync` with a requirements.txt file and watch it installing to Python 2\r\n\r\n\r\n##### Expected result\r\n\r\nInstalls into Python 3 if ran on Python 3 and to Python 2 if ran on Python2\r\n\r\n##### Actual result\r\n\r\nInstalls to whichever Python version `pip` script on `PATH` is from\r\n\r\n##### Extra information\r\n\r\nIf using Python 2 and 3 together, an extra `pip-compile` and `pip-sync` script per Python major and per Python minor version to be able to select Python version to run piptools on would be very useful.\r\n\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call\n\nfrom piptools._compat import stdlib_pkgs, DEV_PKGS\nfrom . import click\nfrom .exceptions import IncompatibleRequirements, UnsupportedConstraint\nfrom .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq\n\nPACKAGES_TO_IGNORE = [\n '-markerlib',\n 'pip',\n 'pip-tools',\n 'pip-review',\n 'pkg-resources',\n] + list(stdlib_pkgs) + list(DEV_PKGS)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n if ireq.link is not None and not ireq.editable:\n msg = ('pip-compile does not support URLs as packages, unless they are editable. '\n 'Perhaps add -e option?')\n raise UnsupportedConstraint(msg, ireq)\n\n key = ireq.link or key_from_req(ireq.req)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n\n return by_key.values()\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n click.echo(\"Everything up-to-date\")\n\n if pip_flags is None:\n pip_flags = []\n\n if not verbose:\n pip_flags += ['-q']\n\n if os.environ.get('VIRTUAL_ENV'):\n # find pip via PATH\n pip = 'pip'\n else:\n # find pip in same directory as pip-sync entry-point script\n pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n\n if to_install:\n if install_flags is None:\n install_flags = []\n if dry_run:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n else:\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n tmp_req_file.write('\\n'.join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}]} | 2,646 | 335 |
gh_patches_debug_24939 | rasdani/github-patches | git_diff | unionai-oss__pandera-958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: mypy complains about datetime.date / pandera.Date type
Hi!
I tried the following to represent a column of `datetime.date` objects (with no time):
```python
expiration_date: pt.Series[datetime.date] = ...
```
and
```python
expiration_date: pt.Series[pandas_engine.Date] = ...
```
Either raises an error "Value of type variable "GenericDtype" of "Series" cannot be "...".
I looked in the tests, it's still unclear to me how to do this.
Using a DataframeSchema I was able to set `dtype=datetime.date` and it appears to work, but what about with SchemaModel?
What's the right declaration?
Thank you,
</issue>
<code>
[start of pandera/typing/common.py]
1 """Common typing functionality."""
2 # pylint:disable=abstract-method,disable=too-many-ancestors
3
4 import inspect
5 from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar
6
7 import pandas as pd
8 import typing_inspect
9
10 from .. import dtypes
11 from ..engines import numpy_engine, pandas_engine
12
13 Bool = dtypes.Bool #: ``"bool"`` numpy dtype
14 DateTime = dtypes.DateTime #: ``"datetime64[ns]"`` numpy dtype
15 Timedelta = dtypes.Timedelta #: ``"timedelta64[ns]"`` numpy dtype
16 Category = dtypes.Category #: pandas ``"categorical"`` datatype
17 Float = dtypes.Float #: ``"float"`` numpy dtype
18 Float16 = dtypes.Float16 #: ``"float16"`` numpy dtype
19 Float32 = dtypes.Float32 #: ``"float32"`` numpy dtype
20 Float64 = dtypes.Float64 #: ``"float64"`` numpy dtype
21 Int = dtypes.Int #: ``"int"`` numpy dtype
22 Int8 = dtypes.Int8 #: ``"int8"`` numpy dtype
23 Int16 = dtypes.Int16 #: ``"int16"`` numpy dtype
24 Int32 = dtypes.Int32 #: ``"int32"`` numpy dtype
25 Int64 = dtypes.Int64 #: ``"int64"`` numpy dtype
26 UInt8 = dtypes.UInt8 #: ``"uint8"`` numpy dtype
27 UInt16 = dtypes.UInt16 #: ``"uint16"`` numpy dtype
28 UInt32 = dtypes.UInt32 #: ``"uint32"`` numpy dtype
29 UInt64 = dtypes.UInt64 #: ``"uint64"`` numpy dtype
30 INT8 = pandas_engine.INT8 #: ``"Int8"`` pandas dtype:: pandas 0.24.0+
31 INT16 = pandas_engine.INT16 #: ``"Int16"`` pandas dtype: pandas 0.24.0+
32 INT32 = pandas_engine.INT32 #: ``"Int32"`` pandas dtype: pandas 0.24.0+
33 INT64 = pandas_engine.INT64 #: ``"Int64"`` pandas dtype: pandas 0.24.0+
34 UINT8 = pandas_engine.UINT8 #: ``"UInt8"`` pandas dtype:: pandas 0.24.0+
35 UINT16 = pandas_engine.UINT16 #: ``"UInt16"`` pandas dtype: pandas 0.24.0+
36 UINT32 = pandas_engine.UINT32 #: ``"UInt32"`` pandas dtype: pandas 0.24.0+
37 UINT64 = pandas_engine.UINT64 #: ``"UInt64"`` pandas dtype: pandas 0.24.0+
38 Object = numpy_engine.Object #: ``"object"`` numpy dtype
39 String = dtypes.String #: ``"str"`` numpy dtype
40 #: ``"string"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will
41 #: fall back on the str-as-object-array representation.
42 STRING = pandas_engine.STRING #: ``"str"`` numpy dtype
43 BOOL = pandas_engine.BOOL #: ``"str"`` numpy dtype
44
45 try:
46 Geometry = pandas_engine.Geometry # : ``"geometry"`` geopandas dtype
47 GEOPANDAS_INSTALLED = True
48 except AttributeError:
49 GEOPANDAS_INSTALLED = False
50
51 if GEOPANDAS_INSTALLED:
52 GenericDtype = TypeVar( # type: ignore
53 "GenericDtype",
54 bool,
55 int,
56 str,
57 float,
58 pd.core.dtypes.base.ExtensionDtype,
59 Bool,
60 DateTime,
61 Timedelta,
62 Category,
63 Float,
64 Float16,
65 Float32,
66 Float64,
67 Int,
68 Int8,
69 Int16,
70 Int32,
71 Int64,
72 UInt8,
73 UInt16,
74 UInt32,
75 UInt64,
76 INT8,
77 INT16,
78 INT32,
79 INT64,
80 UINT8,
81 UINT16,
82 UINT32,
83 UINT64,
84 Object,
85 String,
86 STRING,
87 Geometry,
88 covariant=True,
89 )
90 else:
91 GenericDtype = TypeVar( # type: ignore
92 "GenericDtype",
93 bool,
94 int,
95 str,
96 float,
97 pd.core.dtypes.base.ExtensionDtype,
98 Bool,
99 DateTime,
100 Timedelta,
101 Category,
102 Float,
103 Float16,
104 Float32,
105 Float64,
106 Int,
107 Int8,
108 Int16,
109 Int32,
110 Int64,
111 UInt8,
112 UInt16,
113 UInt32,
114 UInt64,
115 INT8,
116 INT16,
117 INT32,
118 INT64,
119 UINT8,
120 UINT16,
121 UINT32,
122 UINT64,
123 Object,
124 String,
125 STRING,
126 covariant=True,
127 )
128
129 Schema = TypeVar("Schema", bound="SchemaModel") # type: ignore
130
131
132 # pylint:disable=invalid-name
133 if TYPE_CHECKING:
134 T = TypeVar("T") # pragma: no cover
135 else:
136 T = Schema
137
138
139 class DataFrameBase(Generic[T]):
140 # pylint: disable=too-few-public-methods
141 """
142 Pandera Dataframe base class for validating dataframes on
143 initialization.
144 """
145
146 default_dtype: Optional[Type] = None
147
148 def __setattr__(self, name: str, value: Any) -> None:
149 # pylint: disable=no-member
150 object.__setattr__(self, name, value)
151 if name == "__orig_class__":
152 orig_class = getattr(self, "__orig_class__")
153 class_args = getattr(orig_class, "__args__", None)
154 if class_args is not None and any(
155 x.__name__ == "SchemaModel"
156 for x in inspect.getmro(class_args[0])
157 ):
158 schema_model = value.__args__[0]
159
160 # prevent the double validation problem by preventing checks for
161 # dataframes with a defined pandera.schema
162 pandera_accessor = getattr(self, "pandera")
163 if (
164 pandera_accessor.schema is None
165 or pandera_accessor.schema != schema_model.to_schema()
166 ):
167 pandera_accessor.add_schema(schema_model.to_schema())
168 self.__dict__ = schema_model.validate(self).__dict__
169
170
171 # pylint:disable=too-few-public-methods
172 class SeriesBase(Generic[GenericDtype]):
173 """Pandera Series base class to use for all pandas-like APIs."""
174
175 default_dtype: Optional[Type] = None
176
177 def __get__(
178 self, instance: object, owner: Type
179 ) -> str: # pragma: no cover
180 raise AttributeError("Series should resolve to Field-s")
181
182
183 # pylint:disable=too-few-public-methods
184 class IndexBase(Generic[GenericDtype]):
185 """Representation of pandas.Index, only used for type annotation.
186
187 *new in 0.5.0*
188 """
189
190 default_dtype: Optional[Type] = None
191
192 def __get__(
193 self, instance: object, owner: Type
194 ) -> str: # pragma: no cover
195 raise AttributeError("Indexes should resolve to pa.Index-s")
196
197
198 class AnnotationInfo: # pylint:disable=too-few-public-methods
199 """Captures extra information about an annotation.
200
201 Attributes:
202 origin: The non-parameterized generic class.
203 arg: The first generic type (SchemaModel does not support more than
204 1 argument).
205 literal: Whether the annotation is a literal.
206 optional: Whether the annotation is optional.
207 raw_annotation: The raw annotation.
208 metadata: Extra arguments passed to :data:`typing.Annotated`.
209 """
210
211 def __init__(self, raw_annotation: Type) -> None:
212 self._parse_annotation(raw_annotation)
213
214 @property
215 def is_generic_df(self) -> bool:
216 """True if the annotation is a DataFrameBase subclass."""
217 try:
218 if self.origin is None:
219 return False
220 return issubclass(self.origin, DataFrameBase)
221 except TypeError:
222 return False
223
224 def _parse_annotation(self, raw_annotation: Type) -> None:
225 """Parse key information from annotation.
226
227 :param annotation: A subscripted type.
228 :returns: Annotation
229 """
230 self.raw_annotation = raw_annotation
231 self.origin = self.arg = None
232
233 self.optional = typing_inspect.is_optional_type(raw_annotation)
234 if self.optional and typing_inspect.is_union_type(raw_annotation):
235 # Annotated with Optional or Union[..., NoneType]
236 # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)
237 raw_annotation = typing_inspect.get_args(raw_annotation)[0]
238
239 self.origin = typing_inspect.get_origin(raw_annotation)
240 # Replace empty tuple returned from get_args by None
241 args = typing_inspect.get_args(raw_annotation) or None
242 self.arg = args[0] if args else args
243
244 self.metadata = getattr(self.arg, "__metadata__", None)
245 if self.metadata:
246 self.arg = typing_inspect.get_args(self.arg)[0]
247
248 self.literal = typing_inspect.is_literal_type(self.arg)
249 if self.literal:
250 self.arg = typing_inspect.get_args(self.arg)[0]
251
252 self.default_dtype = getattr(raw_annotation, "default_dtype", None)
253
[end of pandera/typing/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandera/typing/common.py b/pandera/typing/common.py
--- a/pandera/typing/common.py
+++ b/pandera/typing/common.py
@@ -11,7 +11,9 @@
from ..engines import numpy_engine, pandas_engine
Bool = dtypes.Bool #: ``"bool"`` numpy dtype
+Date = dtypes.Date #: ``datetime.date`` object dtype
DateTime = dtypes.DateTime #: ``"datetime64[ns]"`` numpy dtype
+Decimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype
Timedelta = dtypes.Timedelta #: ``"timedelta64[ns]"`` numpy dtype
Category = dtypes.Category #: pandas ``"categorical"`` datatype
Float = dtypes.Float #: ``"float"`` numpy dtype
@@ -57,7 +59,9 @@
float,
pd.core.dtypes.base.ExtensionDtype,
Bool,
+ Date,
DateTime,
+ Decimal,
Timedelta,
Category,
Float,
@@ -96,7 +100,9 @@
float,
pd.core.dtypes.base.ExtensionDtype,
Bool,
+ Date,
DateTime,
+ Decimal,
Timedelta,
Category,
Float,
| {"golden_diff": "diff --git a/pandera/typing/common.py b/pandera/typing/common.py\n--- a/pandera/typing/common.py\n+++ b/pandera/typing/common.py\n@@ -11,7 +11,9 @@\n from ..engines import numpy_engine, pandas_engine\n \n Bool = dtypes.Bool #: ``\"bool\"`` numpy dtype\n+Date = dtypes.Date #: ``datetime.date`` object dtype\n DateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\n+Decimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype\n Timedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\n Category = dtypes.Category #: pandas ``\"categorical\"`` datatype\n Float = dtypes.Float #: ``\"float\"`` numpy dtype\n@@ -57,7 +59,9 @@\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n+ Date,\n DateTime,\n+ Decimal,\n Timedelta,\n Category,\n Float,\n@@ -96,7 +100,9 @@\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n+ Date,\n DateTime,\n+ Decimal,\n Timedelta,\n Category,\n Float,\n", "issue": "Bug: mypy complains about datetime.date / pandera.Date type\nHi! \r\nI tried the following to represent a column of `datetime.date` objects (with no time):\r\n```python\r\n expiration_date: pt.Series[datetime.date] = ...\r\n```\r\nand\r\n```python\r\n expiration_date: pt.Series[pandas_engine.Date] = ...\r\n```\r\n\r\nEither raises an error \"Value of type variable \"GenericDtype\" of \"Series\" cannot be \"...\".\r\nI looked in the tests, it's still unclear to me how to do this.\r\n\r\nUsing a DataframeSchema I was able to set `dtype=datetime.date` and it appears to work, but what about with SchemaModel?\r\nWhat's the right declaration?\r\n\r\nThank you,\r\n\r\n\n", "before_files": [{"content": "\"\"\"Common typing functionality.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\n\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .. import dtypes\nfrom ..engines import numpy_engine, pandas_engine\n\nBool = dtypes.Bool #: ``\"bool\"`` numpy dtype\nDateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\nTimedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = dtypes.Category #: pandas ``\"categorical\"`` datatype\nFloat = dtypes.Float #: ``\"float\"`` numpy dtype\nFloat16 = dtypes.Float16 #: ``\"float16\"`` numpy dtype\nFloat32 = dtypes.Float32 #: ``\"float32\"`` numpy dtype\nFloat64 = dtypes.Float64 #: ``\"float64\"`` numpy dtype\nInt = dtypes.Int #: ``\"int\"`` numpy dtype\nInt8 = dtypes.Int8 #: ``\"int8\"`` numpy dtype\nInt16 = dtypes.Int16 #: ``\"int16\"`` numpy dtype\nInt32 = dtypes.Int32 #: ``\"int32\"`` numpy dtype\nInt64 = dtypes.Int64 #: ``\"int64\"`` numpy dtype\nUInt8 = dtypes.UInt8 #: ``\"uint8\"`` numpy dtype\nUInt16 = dtypes.UInt16 #: ``\"uint16\"`` numpy dtype\nUInt32 = dtypes.UInt32 #: ``\"uint32\"`` numpy dtype\nUInt64 = dtypes.UInt64 #: ``\"uint64\"`` numpy dtype\nINT8 = pandas_engine.INT8 #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = pandas_engine.INT16 #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = pandas_engine.INT32 #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = pandas_engine.INT64 #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = pandas_engine.UINT8 #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = pandas_engine.UINT16 #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = pandas_engine.UINT32 #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = pandas_engine.UINT64 #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = numpy_engine.Object #: ``\"object\"`` numpy dtype\nString = dtypes.String #: ``\"str\"`` numpy dtype\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = pandas_engine.STRING #: ``\"str\"`` numpy dtype\nBOOL = pandas_engine.BOOL #: ``\"str\"`` numpy dtype\n\ntry:\n Geometry = pandas_engine.Geometry # : ``\"geometry\"`` geopandas dtype\n GEOPANDAS_INSTALLED = True\nexcept AttributeError:\n GEOPANDAS_INSTALLED = False\n\nif GEOPANDAS_INSTALLED:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n DateTime,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n Geometry,\n covariant=True,\n )\nelse:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n DateTime,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n covariant=True,\n )\n\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = Schema\n\n\nclass DataFrameBase(Generic[T]):\n # pylint: disable=too-few-public-methods\n \"\"\"\n Pandera Dataframe base class for validating dataframes on\n initialization.\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __setattr__(self, name: str, value: Any) -> None:\n # pylint: disable=no-member\n object.__setattr__(self, name, value)\n if name == \"__orig_class__\":\n orig_class = getattr(self, \"__orig_class__\")\n class_args = getattr(orig_class, \"__args__\", None)\n if class_args is not None and any(\n x.__name__ == \"SchemaModel\"\n for x in inspect.getmro(class_args[0])\n ):\n schema_model = value.__args__[0]\n\n # prevent the double validation problem by preventing checks for\n # dataframes with a defined pandera.schema\n pandera_accessor = getattr(self, \"pandera\")\n if (\n pandera_accessor.schema is None\n or pandera_accessor.schema != schema_model.to_schema()\n ):\n pandera_accessor.add_schema(schema_model.to_schema())\n self.__dict__ = schema_model.validate(self).__dict__\n\n\n# pylint:disable=too-few-public-methods\nclass SeriesBase(Generic[GenericDtype]):\n \"\"\"Pandera Series base class to use for all pandas-like APIs.\"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Series should resolve to Field-s\")\n\n\n# pylint:disable=too-few-public-methods\nclass IndexBase(Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Indexes should resolve to pa.Index-s\")\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than\n 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n metadata: Extra arguments passed to :data:`typing.Annotated`.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a DataFrameBase subclass.\"\"\"\n try:\n if self.origin is None:\n return False\n return issubclass(self.origin, DataFrameBase)\n except TypeError:\n return False\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n self.origin = self.arg = None\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional and typing_inspect.is_union_type(raw_annotation):\n # Annotated with Optional or Union[..., NoneType]\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n self.origin = typing_inspect.get_origin(raw_annotation)\n # Replace empty tuple returned from get_args by None\n args = typing_inspect.get_args(raw_annotation) or None\n self.arg = args[0] if args else args\n\n self.metadata = getattr(self.arg, \"__metadata__\", None)\n if self.metadata:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.default_dtype = getattr(raw_annotation, \"default_dtype\", None)\n", "path": "pandera/typing/common.py"}]} | 3,442 | 280 |
gh_patches_debug_66775 | rasdani/github-patches | git_diff | apache__airflow-1296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent 'owner' field in examples
Dear Airflow Maintainers,
### Environment
- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**
- Screen shots of your DAG's graph and tree views:

- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**
- Python Version: `$ python --version` **2.7**
### Description of Issue
- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**
- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**
### Reproduction Steps
1. install airflow 1.7.0 via pip
2. start the webserver
3. look at the web UI, probably http://localhost:8080
**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**
</issue>
<code>
[start of airflow/example_dags/example_trigger_controller_dag.py]
1
2 """This example illustrates the use of the TriggerDagRunOperator. There are 2
3 entities at work in this scenario:
4 1. The Controller DAG - the DAG that conditionally executes the trigger
5 2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)
6
7 This example illustrates the following features :
8 1. A TriggerDagRunOperator that takes:
9 a. A python callable that decides whether or not to trigger the Target DAG
10 b. An optional params dict passed to the python callable to help in
11 evaluating whether or not to trigger the Target DAG
12 c. The id (name) of the Target DAG
13 d. The python callable can add contextual info to the DagRun created by
14 way of adding a Pickleable payload (e.g. dictionary of primitives). This
15 state is then made available to the TargetDag
16 2. A Target DAG : c.f. example_trigger_target_dag.py
17 """
18
19 from airflow import DAG
20 from airflow.operators import TriggerDagRunOperator
21 from datetime import datetime
22
23 import pprint
24
25 pp = pprint.PrettyPrinter(indent=4)
26
27
28 def conditionally_trigger(context, dag_run_obj):
29 """This function decides whether or not to Trigger the remote DAG"""
30 c_p =context['params']['condition_param']
31 print("Controller DAG : conditionally_trigger = {}".format(c_p))
32 if context['params']['condition_param']:
33 dag_run_obj.payload = {'message': context['params']['message']}
34 pp.pprint(dag_run_obj.payload)
35 return dag_run_obj
36
37
38 # Define the DAG
39 dag = DAG(dag_id='example_trigger_controller_dag',
40 default_args={"owner": "me",
41 "start_date": datetime.now()},
42 schedule_interval='@once')
43
44
45 # Define the single task in this controller example DAG
46 trigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',
47 trigger_dag_id="example_trigger_target_dag",
48 python_callable=conditionally_trigger,
49 params={'condition_param': True,
50 'message': 'Hello World'},
51 dag=dag)
52
[end of airflow/example_dags/example_trigger_controller_dag.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py
--- a/airflow/example_dags/example_trigger_controller_dag.py
+++ b/airflow/example_dags/example_trigger_controller_dag.py
@@ -37,7 +37,7 @@
# Define the DAG
dag = DAG(dag_id='example_trigger_controller_dag',
- default_args={"owner": "me",
+ default_args={"owner": "airflow",
"start_date": datetime.now()},
schedule_interval='@once')
| {"golden_diff": "diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py\n--- a/airflow/example_dags/example_trigger_controller_dag.py\n+++ b/airflow/example_dags/example_trigger_controller_dag.py\n@@ -37,7 +37,7 @@\n \n # Define the DAG\n dag = DAG(dag_id='example_trigger_controller_dag',\n- default_args={\"owner\": \"me\",\n+ default_args={\"owner\": \"airflow\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n", "issue": "Inconsistent 'owner' field in examples\nDear Airflow Maintainers,\n### Environment\n- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**\n- Screen shots of your DAG's graph and tree views:\n \n- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**\n- Python Version: `$ python --version` **2.7**\n### Description of Issue\n- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**\n- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**\n### Reproduction Steps\n1. install airflow 1.7.0 via pip\n2. start the webserver\n3. look at the web UI, probably http://localhost:8080\n\n**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**\n\n", "before_files": [{"content": "\n\"\"\"This example illustrates the use of the TriggerDagRunOperator. There are 2\nentities at work in this scenario:\n1. The Controller DAG - the DAG that conditionally executes the trigger\n2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)\n\nThis example illustrates the following features :\n1. A TriggerDagRunOperator that takes:\n a. A python callable that decides whether or not to trigger the Target DAG\n b. An optional params dict passed to the python callable to help in\n evaluating whether or not to trigger the Target DAG\n c. The id (name) of the Target DAG\n d. The python callable can add contextual info to the DagRun created by\n way of adding a Pickleable payload (e.g. dictionary of primitives). This\n state is then made available to the TargetDag\n2. A Target DAG : c.f. example_trigger_target_dag.py\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.operators import TriggerDagRunOperator\nfrom datetime import datetime\n\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef conditionally_trigger(context, dag_run_obj):\n \"\"\"This function decides whether or not to Trigger the remote DAG\"\"\"\n c_p =context['params']['condition_param']\n print(\"Controller DAG : conditionally_trigger = {}\".format(c_p))\n if context['params']['condition_param']:\n dag_run_obj.payload = {'message': context['params']['message']}\n pp.pprint(dag_run_obj.payload)\n return dag_run_obj\n\n\n# Define the DAG\ndag = DAG(dag_id='example_trigger_controller_dag',\n default_args={\"owner\": \"me\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n\n\n# Define the single task in this controller example DAG\ntrigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',\n trigger_dag_id=\"example_trigger_target_dag\",\n python_callable=conditionally_trigger,\n params={'condition_param': True,\n 'message': 'Hello World'},\n dag=dag)\n", "path": "airflow/example_dags/example_trigger_controller_dag.py"}]} | 1,371 | 127 |
gh_patches_debug_37830 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: 运行chatgpt推理示例报错
### 🐛 Describe the bug
(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ 运行inference.py 抛出OSError:

### Environment
_No response_
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/examples/inference.py]
1 import argparse
2 import torch
3
4 from chatgpt.nn import BLOOMActor, GPTActor, OPTActor
5 from transformers import AutoTokenizer
6 from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
7
8
9 def eval(args):
10 # configure model
11 if args.model == 'gpt2':
12 model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
13 elif args.model == 'bloom':
14 model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
15 elif args.model == 'opt':
16 model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
17 else:
18 raise ValueError(f'Unsupported model "{args.model}"')
19
20 # configure tokenizer
21 if args.model == 'gpt2':
22 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
23 tokenizer.pad_token = tokenizer.eos_token
24 elif args.model == 'bloom':
25 tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
26 tokenizer.pad_token = tokenizer.eos_token
27 elif args.model == 'opt':
28 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
29 else:
30 raise ValueError(f'Unsupported model "{args.model}"')
31
32 model.eval()
33 input = args.input
34 input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
35 outputs = model.generate(input_ids,
36 max_length=args.max_length,
37 do_sample=True,
38 top_k=50,
39 top_p=0.95,
40 num_return_sequences=1)
41 output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)
42 print(output)
43
44
45 if __name__ == '__main__':
46 parser = argparse.ArgumentParser()
47 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
48 parser.add_argument('--pretrain', type=str, default=None)
49 parser.add_argument('--input', type=str, default='Q: How are you ? A:')
50 parser.add_argument('--max_length', type=int, default=100)
51 args = parser.parse_args()
52 eval(args)
53
[end of applications/ChatGPT/examples/inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py
--- a/applications/ChatGPT/examples/inference.py
+++ b/applications/ChatGPT/examples/inference.py
@@ -9,30 +9,34 @@
def eval(args):
# configure model
if args.model == 'gpt2':
- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = GPTActor().to(torch.cuda.current_device())
elif args.model == 'bloom':
- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = BLOOMActor().to(torch.cuda.current_device())
elif args.model == 'opt':
- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = OPTActor().to(torch.cuda.current_device())
else:
raise ValueError(f'Unsupported model "{args.model}"')
+ state_dict = torch.load(args.pretrain)
+ actor.model.load_state_dict(state_dict)
+
+
# configure tokenizer
if args.model == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'bloom':
- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'opt':
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')
else:
raise ValueError(f'Unsupported model "{args.model}"')
- model.eval()
+ actor.eval()
input = args.input
input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
- outputs = model.generate(input_ids,
+ outputs = actor.generate(input_ids,
max_length=args.max_length,
do_sample=True,
top_k=50,
@@ -46,7 +50,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
parser.add_argument('--pretrain', type=str, default=None)
- parser.add_argument('--input', type=str, default='Q: How are you ? A:')
+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')
parser.add_argument('--max_length', type=int, default=100)
args = parser.parse_args()
eval(args)
| {"golden_diff": "diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py\n--- a/applications/ChatGPT/examples/inference.py\n+++ b/applications/ChatGPT/examples/inference.py\n@@ -9,30 +9,34 @@\n def eval(args):\n # configure model\n if args.model == 'gpt2':\n- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = GPTActor().to(torch.cuda.current_device())\n elif args.model == 'bloom':\n- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = BLOOMActor().to(torch.cuda.current_device())\n elif args.model == 'opt':\n- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = OPTActor().to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n+ state_dict = torch.load(args.pretrain)\n+ actor.model.load_state_dict(state_dict)\n+ \n+ \n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n- tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n- model.eval()\n+ actor.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n- outputs = model.generate(input_ids,\n+ outputs = actor.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n@@ -46,7 +50,7 @@\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n- parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "issue": "[BUG]: \u8fd0\u884cchatgpt\u63a8\u7406\u793a\u4f8b\u62a5\u9519\n### \ud83d\udc1b Describe the bug\n\n(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ \u8fd0\u884cinference.py \u629b\u51faOSError:\r\n\r\n\n\n### Environment\n\n_No response_\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport torch\n\nfrom chatgpt.nn import BLOOMActor, GPTActor, OPTActor\nfrom transformers import AutoTokenizer\nfrom transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer\n\n\ndef eval(args):\n # configure model\n if args.model == 'gpt2':\n model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'bloom':\n model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'opt':\n model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n model.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n outputs = model.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n top_p=0.95,\n num_return_sequences=1)\n output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)\n print(output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "path": "applications/ChatGPT/examples/inference.py"}]} | 1,250 | 592 |
gh_patches_debug_15935 | rasdani/github-patches | git_diff | vispy__vispy-305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The first emitted Timer event has `None` as `dt` property
``` python
def on_timer(self, event):
print event.dt
```
displays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.
</issue>
<code>
[start of vispy/app/timer.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 from ..util.event import Event, EmitterGroup
8 from ..util.ptime import time as precision_time
9 from ..ext.six import string_types
10 from .base import BaseTimerBackend as TimerBackend # noqa
11 from . import use_app, Application
12
13
14 class Timer(object):
15
16 """Timer used to schedule events in the future or on a repeating schedule
17
18 Parameters
19 ----------
20 interval : float
21 Time between events.
22 connect : function | None
23 The function to call.
24 iterations : int
25 Number of iterations. Can be -1 for infinite.
26 start : bool
27 Whether to start the timer.
28 app : instance of vispy.app.Application
29 The application to attach the timer to.
30 """
31
32 def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,
33 app=None):
34 self.events = EmitterGroup(source=self,
35 start=Event,
36 stop=Event,
37 timeout=Event)
38 #self.connect = self.events.timeout.connect
39 #self.disconnect = self.events.timeout.disconnect
40
41 # Get app instance
42 if app is None:
43 self._app = use_app()
44 elif isinstance(app, Application):
45 self._app = app
46 elif isinstance(app, string_types):
47 self._app = Application(app)
48 else:
49 raise ValueError('Invalid value for app %r' % app)
50
51 # Ensure app has backend app object
52 self._app.native
53
54 # Instantiate the backed with the right class
55 self._backend = self._app.backend_module.TimerBackend(self)
56
57 self._interval = interval
58 self._running = False
59 self._last_emit_time = None
60 self.iter_count = 0
61 self.max_iterations = iterations
62 if connect is not None:
63 self.connect(connect)
64 if start:
65 self.start()
66
67 @property
68 def app(self):
69 """ The vispy Application instance on which this Timer is based.
70 """
71 return self._app
72
73 @property
74 def interval(self):
75 return self._interval
76
77 @interval.setter
78 def interval(self, val):
79 self._interval = val
80 if self.running:
81 self.stop()
82 self.start()
83
84 @property
85 def running(self):
86 return self._running
87
88 def start(self, interval=None, iterations=None):
89 """Start the timer.
90
91 A timeout event will be generated every *interval* seconds.
92 If *interval* is None, then self.interval will be used.
93
94 If *iterations* is specified, the timer will stop after
95 emitting that number of events. If unspecified, then
96 the previous value of self.iterations will be used. If the value is
97 negative, then the timer will continue running until stop() is called.
98 """
99 self.iter_count = 0
100 if interval is not None:
101 self.interval = interval
102 if iterations is not None:
103 self.max_iterations = iterations
104 self._backend._vispy_start(self.interval)
105 self._running = True
106 self._last_emit_time = None
107 self.events.start(type='timer_start')
108
109 def stop(self):
110 """Stop the timer."""
111 self._backend._vispy_stop()
112 self._running = False
113 self.events.stop(type='timer_stop')
114
115 # use timer.app.run() and .quit() instead.
116 # def run_event_loop(self):
117 #"""Execute the event loop for this Timer's backend.
118 #"""
119 # return self._backend._vispy_run()
120
121 # def quit_event_loop(self):
122 #"""Exit the event loop for this Timer's backend.
123 #"""
124 # return self._backend._vispy_quit()
125
126 @property
127 def native(self):
128 """ The native timer on which this Timer is based.
129 """
130 return self._backend._vispy_get_native_timer()
131
132 def _timeout(self, *args):
133 # called when the backend timer has triggered.
134 if not self.running:
135 return
136 if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:
137 self.stop()
138 return
139
140 # compute dt since last event
141 now = precision_time()
142 if self._last_emit_time is None:
143 dt = None
144 else:
145 dt = now - self._last_emit_time
146 self._last_emit_time = now
147
148 self.events.timeout(
149 type='timer_timeout',
150 iteration=self.iter_count,
151 dt=dt)
152 self.iter_count += 1
153
154 def connect(self, callback):
155 """ Alias for self.events.timeout.connect() """
156 return self.events.timeout.connect(callback)
157
158 def disconnect(self, callback=None):
159 """ Alias for self.events.timeout.disconnect() """
160 return self.events.timeout.disconnect(callback)
161
[end of vispy/app/timer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/app/timer.py b/vispy/app/timer.py
--- a/vispy/app/timer.py
+++ b/vispy/app/timer.py
@@ -103,7 +103,7 @@
self.max_iterations = iterations
self._backend._vispy_start(self.interval)
self._running = True
- self._last_emit_time = None
+ self._last_emit_time = precision_time()
self.events.start(type='timer_start')
def stop(self):
@@ -139,10 +139,7 @@
# compute dt since last event
now = precision_time()
- if self._last_emit_time is None:
- dt = None
- else:
- dt = now - self._last_emit_time
+ dt = now - self._last_emit_time
self._last_emit_time = now
self.events.timeout(
| {"golden_diff": "diff --git a/vispy/app/timer.py b/vispy/app/timer.py\n--- a/vispy/app/timer.py\n+++ b/vispy/app/timer.py\n@@ -103,7 +103,7 @@\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n- self._last_emit_time = None\n+ self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n \n def stop(self):\n@@ -139,10 +139,7 @@\n \n # compute dt since last event\n now = precision_time()\n- if self._last_emit_time is None:\n- dt = None\n- else:\n- dt = now - self._last_emit_time\n+ dt = now - self._last_emit_time\n self._last_emit_time = now\n \n self.events.timeout(\n", "issue": "The first emitted Timer event has `None` as `dt` property\n``` python\ndef on_timer(self, event):\n print event.dt\n```\n\ndisplays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._last_emit_time = None\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n if self._last_emit_time is None:\n dt = None\n else:\n dt = now - self._last_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}]} | 2,044 | 204 |
gh_patches_debug_24108 | rasdani/github-patches | git_diff | pypa__pip-11264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checking out Bazaar branch makes full clone
When checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:
For example, performance on my system for 'bzr co --lightweight lp:bzr':
0.60s user 0.11s system 5% cpu 12.234 total
Performance on my system for 'bzr branch lp:bzr':
65.41s user 1.48s system 39% cpu 2:47.91 total
</issue>
<code>
[start of src/pip/_internal/vcs/bazaar.py]
1 import logging
2 from typing import List, Optional, Tuple
3
4 from pip._internal.utils.misc import HiddenText, display_path
5 from pip._internal.utils.subprocess import make_command
6 from pip._internal.utils.urls import path_to_url
7 from pip._internal.vcs.versioncontrol import (
8 AuthInfo,
9 RemoteNotFoundError,
10 RevOptions,
11 VersionControl,
12 vcs,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class Bazaar(VersionControl):
19 name = "bzr"
20 dirname = ".bzr"
21 repo_name = "branch"
22 schemes = (
23 "bzr+http",
24 "bzr+https",
25 "bzr+ssh",
26 "bzr+sftp",
27 "bzr+ftp",
28 "bzr+lp",
29 "bzr+file",
30 )
31
32 @staticmethod
33 def get_base_rev_args(rev: str) -> List[str]:
34 return ["-r", rev]
35
36 def fetch_new(
37 self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
38 ) -> None:
39 rev_display = rev_options.to_display()
40 logger.info(
41 "Checking out %s%s to %s",
42 url,
43 rev_display,
44 display_path(dest),
45 )
46 if verbosity <= 0:
47 flag = "--quiet"
48 elif verbosity == 1:
49 flag = ""
50 else:
51 flag = f"-{'v'*verbosity}"
52 cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
53 self.run_command(cmd_args)
54
55 def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
56 self.run_command(make_command("switch", url), cwd=dest)
57
58 def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
59 cmd_args = make_command("pull", "-q", rev_options.to_args())
60 self.run_command(cmd_args, cwd=dest)
61
62 @classmethod
63 def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
64 # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
65 url, rev, user_pass = super().get_url_rev_and_auth(url)
66 if url.startswith("ssh://"):
67 url = "bzr+" + url
68 return url, rev, user_pass
69
70 @classmethod
71 def get_remote_url(cls, location: str) -> str:
72 urls = cls.run_command(
73 ["info"], show_stdout=False, stdout_only=True, cwd=location
74 )
75 for line in urls.splitlines():
76 line = line.strip()
77 for x in ("checkout of branch: ", "parent branch: "):
78 if line.startswith(x):
79 repo = line.split(x)[1]
80 if cls._is_local_repository(repo):
81 return path_to_url(repo)
82 return repo
83 raise RemoteNotFoundError
84
85 @classmethod
86 def get_revision(cls, location: str) -> str:
87 revision = cls.run_command(
88 ["revno"],
89 show_stdout=False,
90 stdout_only=True,
91 cwd=location,
92 )
93 return revision.splitlines()[-1]
94
95 @classmethod
96 def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
97 """Always assume the versions don't match"""
98 return False
99
100
101 vcs.register(Bazaar)
102
[end of src/pip/_internal/vcs/bazaar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py
--- a/src/pip/_internal/vcs/bazaar.py
+++ b/src/pip/_internal/vcs/bazaar.py
@@ -49,14 +49,25 @@
flag = ""
else:
flag = f"-{'v'*verbosity}"
- cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
+ cmd_args = make_command(
+ "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
+ )
self.run_command(cmd_args)
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
self.run_command(make_command("switch", url), cwd=dest)
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
- cmd_args = make_command("pull", "-q", rev_options.to_args())
+ output = self.run_command(
+ make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
+ )
+ if output.startswith("Standalone "):
+ # Older versions of pip used to create standalone branches.
+ # Convert the standalone branch to a checkout by calling "bzr bind".
+ cmd_args = make_command("bind", "-q", url)
+ self.run_command(cmd_args, cwd=dest)
+
+ cmd_args = make_command("update", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py\n--- a/src/pip/_internal/vcs/bazaar.py\n+++ b/src/pip/_internal/vcs/bazaar.py\n@@ -49,14 +49,25 @@\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n- cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n+ cmd_args = make_command(\n+ \"checkout\", \"--lightweight\", flag, rev_options.to_args(), url, dest\n+ )\n self.run_command(cmd_args)\n \n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n \n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n- cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n+ output = self.run_command(\n+ make_command(\"info\"), show_stdout=False, stdout_only=True, cwd=dest\n+ )\n+ if output.startswith(\"Standalone \"):\n+ # Older versions of pip used to create standalone branches.\n+ # Convert the standalone branch to a checkout by calling \"bzr bind\".\n+ cmd_args = make_command(\"bind\", \"-q\", url)\n+ self.run_command(cmd_args, cwd=dest)\n+\n+ cmd_args = make_command(\"update\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n \n @classmethod\n", "issue": "Checking out Bazaar branch makes full clone\nWhen checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:\r\n\r\nFor example, performance on my system for 'bzr co --lightweight lp:bzr':\r\n\r\n0.60s user 0.11s system 5% cpu 12.234 total\r\n\r\nPerformance on my system for 'bzr branch lp:bzr':\r\n\r\n65.41s user 1.48s system 39% cpu 2:47.91 total\r\n\n", "before_files": [{"content": "import logging\nfrom typing import List, Optional, Tuple\n\nfrom pip._internal.utils.misc import HiddenText, display_path\nfrom pip._internal.utils.subprocess import make_command\nfrom pip._internal.utils.urls import path_to_url\nfrom pip._internal.vcs.versioncontrol import (\n AuthInfo,\n RemoteNotFoundError,\n RevOptions,\n VersionControl,\n vcs,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bazaar(VersionControl):\n name = \"bzr\"\n dirname = \".bzr\"\n repo_name = \"branch\"\n schemes = (\n \"bzr+http\",\n \"bzr+https\",\n \"bzr+ssh\",\n \"bzr+sftp\",\n \"bzr+ftp\",\n \"bzr+lp\",\n \"bzr+file\",\n )\n\n @staticmethod\n def get_base_rev_args(rev: str) -> List[str]:\n return [\"-r\", rev]\n\n def fetch_new(\n self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int\n ) -> None:\n rev_display = rev_options.to_display()\n logger.info(\n \"Checking out %s%s to %s\",\n url,\n rev_display,\n display_path(dest),\n )\n if verbosity <= 0:\n flag = \"--quiet\"\n elif verbosity == 1:\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n self.run_command(cmd_args)\n\n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n\n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n\n @classmethod\n def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:\n # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it\n url, rev, user_pass = super().get_url_rev_and_auth(url)\n if url.startswith(\"ssh://\"):\n url = \"bzr+\" + url\n return url, rev, user_pass\n\n @classmethod\n def get_remote_url(cls, location: str) -> str:\n urls = cls.run_command(\n [\"info\"], show_stdout=False, stdout_only=True, cwd=location\n )\n for line in urls.splitlines():\n line = line.strip()\n for x in (\"checkout of branch: \", \"parent branch: \"):\n if line.startswith(x):\n repo = line.split(x)[1]\n if cls._is_local_repository(repo):\n return path_to_url(repo)\n return repo\n raise RemoteNotFoundError\n\n @classmethod\n def get_revision(cls, location: str) -> str:\n revision = cls.run_command(\n [\"revno\"],\n show_stdout=False,\n stdout_only=True,\n cwd=location,\n )\n return revision.splitlines()[-1]\n\n @classmethod\n def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Bazaar)\n", "path": "src/pip/_internal/vcs/bazaar.py"}]} | 1,613 | 355 |
gh_patches_debug_5737 | rasdani/github-patches | git_diff | spesmilo__electrum-1738 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: Label colour - History Window
The transaction amount of outgoing transactions, when viewed in the history window, is coloured red.
It would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions.
Not a big issue but would be a 'nice to have'.
</issue>
<code>
[start of gui/qt/history_widget.py]
1 #!/usr/bin/env python
2 #
3 # Electrum - lightweight Bitcoin client
4 # Copyright (C) 2015 Thomas Voegtlin
5 #
6 # Permission is hereby granted, free of charge, to any person
7 # obtaining a copy of this software and associated documentation files
8 # (the "Software"), to deal in the Software without restriction,
9 # including without limitation the rights to use, copy, modify, merge,
10 # publish, distribute, sublicense, and/or sell copies of the Software,
11 # and to permit persons to whom the Software is furnished to do so,
12 # subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be
15 # included in all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25
26
27 import webbrowser
28
29 from util import *
30 from electrum.i18n import _
31 from electrum.util import block_explorer_URL, format_satoshis, format_time
32 from electrum.plugins import run_hook
33
34
35 class HistoryWidget(MyTreeWidget):
36
37 def __init__(self, parent=None):
38 MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
39 self.refresh_headers()
40 self.setColumnHidden(1, True)
41 self.config = self.parent.config
42
43 def refresh_headers(self):
44 headers = ['', '', _('Date'), _('Description') , _('Amount'),
45 _('Balance')]
46 run_hook('history_tab_headers', headers)
47 self.update_headers(headers)
48
49 def get_icon(self, conf, timestamp):
50 time_str = _("unknown")
51 if conf > 0:
52 time_str = format_time(timestamp)
53 if conf == -1:
54 time_str = _('Not Verified')
55 icon = QIcon(":icons/unconfirmed.png")
56 elif conf == 0:
57 time_str = _('Unconfirmed')
58 icon = QIcon(":icons/unconfirmed.png")
59 elif conf < 6:
60 icon = QIcon(":icons/clock%d.png"%conf)
61 else:
62 icon = QIcon(":icons/confirmed.png")
63 return icon, time_str
64
65 def get_domain(self):
66 '''Replaced in address_dialog.py'''
67 return self.wallet.get_account_addresses(self.parent.current_account)
68
69 def on_update(self):
70 self.wallet = self.parent.wallet
71 h = self.wallet.get_history(self.get_domain())
72
73 item = self.currentItem()
74 current_tx = item.data(0, Qt.UserRole).toString() if item else None
75 self.clear()
76 run_hook('history_tab_update_begin')
77 for tx in h:
78 tx_hash, conf, value, timestamp, balance = tx
79 if conf is None and timestamp is None:
80 continue # skip history in offline mode
81 icon, time_str = self.get_icon(conf, timestamp)
82 v_str = self.parent.format_amount(value, True, whitespaces=True)
83 balance_str = self.parent.format_amount(balance, whitespaces=True)
84 label = self.wallet.get_label(tx_hash)
85 entry = ['', tx_hash, time_str, label, v_str, balance_str]
86 run_hook('history_tab_update', tx, entry)
87 item = QTreeWidgetItem(entry)
88 item.setIcon(0, icon)
89 for i in range(len(entry)):
90 if i>3:
91 item.setTextAlignment(i, Qt.AlignRight)
92 if i!=2:
93 item.setFont(i, QFont(MONOSPACE_FONT))
94 if value < 0:
95 item.setForeground(4, QBrush(QColor("#BC1E1E")))
96 if tx_hash:
97 item.setData(0, Qt.UserRole, tx_hash)
98 self.insertTopLevelItem(0, item)
99 if current_tx == tx_hash:
100 self.setCurrentItem(item)
101
102 def update_item(self, tx_hash, conf, timestamp):
103 icon, time_str = self.get_icon(conf, timestamp)
104 items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
105 if items:
106 item = items[0]
107 item.setIcon(0, icon)
108 item.setText(2, time_str)
109
110 def create_menu(self, position):
111 self.selectedIndexes()
112 item = self.currentItem()
113 if not item:
114 return
115 tx_hash = str(item.data(0, Qt.UserRole).toString())
116 if not tx_hash:
117 return
118 tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
119 if not tx_URL:
120 return
121 menu = QMenu()
122 menu.addAction(_("Copy ID to Clipboard"), lambda: self.parent.app.clipboard().setText(tx_hash))
123 menu.addAction(_("Details"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))
124 menu.addAction(_("Edit description"), lambda: self.editItem(item, self.editable_columns[0]))
125 menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
126 menu.exec_(self.viewport().mapToGlobal(position))
127
[end of gui/qt/history_widget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py
--- a/gui/qt/history_widget.py
+++ b/gui/qt/history_widget.py
@@ -92,6 +92,7 @@
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
if value < 0:
+ item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
| {"golden_diff": "diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py\n--- a/gui/qt/history_widget.py\n+++ b/gui/qt/history_widget.py\n@@ -92,6 +92,7 @@\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n+ item.setForeground(3, QBrush(QColor(\"#BC1E1E\")))\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n", "issue": "Feature request: Label colour - History Window\nThe transaction amount of outgoing transactions, when viewed in the history window, is coloured red. \n\nIt would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions. \n\nNot a big issue but would be a 'nice to have'.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Electrum - lightweight Bitcoin client\n# Copyright (C) 2015 Thomas Voegtlin\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport webbrowser\n\nfrom util import *\nfrom electrum.i18n import _\nfrom electrum.util import block_explorer_URL, format_satoshis, format_time\nfrom electrum.plugins import run_hook\n\n\nclass HistoryWidget(MyTreeWidget):\n\n def __init__(self, parent=None):\n MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)\n self.refresh_headers()\n self.setColumnHidden(1, True)\n self.config = self.parent.config\n\n def refresh_headers(self):\n headers = ['', '', _('Date'), _('Description') , _('Amount'),\n _('Balance')]\n run_hook('history_tab_headers', headers)\n self.update_headers(headers)\n\n def get_icon(self, conf, timestamp):\n time_str = _(\"unknown\")\n if conf > 0:\n time_str = format_time(timestamp)\n if conf == -1:\n time_str = _('Not Verified')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf == 0:\n time_str = _('Unconfirmed')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf < 6:\n icon = QIcon(\":icons/clock%d.png\"%conf)\n else:\n icon = QIcon(\":icons/confirmed.png\")\n return icon, time_str\n\n def get_domain(self):\n '''Replaced in address_dialog.py'''\n return self.wallet.get_account_addresses(self.parent.current_account)\n\n def on_update(self):\n self.wallet = self.parent.wallet\n h = self.wallet.get_history(self.get_domain())\n\n item = self.currentItem()\n current_tx = item.data(0, Qt.UserRole).toString() if item else None\n self.clear()\n run_hook('history_tab_update_begin')\n for tx in h:\n tx_hash, conf, value, timestamp, balance = tx\n if conf is None and timestamp is None:\n continue # skip history in offline mode\n icon, time_str = self.get_icon(conf, timestamp)\n v_str = self.parent.format_amount(value, True, whitespaces=True)\n balance_str = self.parent.format_amount(balance, whitespaces=True)\n label = self.wallet.get_label(tx_hash)\n entry = ['', tx_hash, time_str, label, v_str, balance_str]\n run_hook('history_tab_update', tx, entry)\n item = QTreeWidgetItem(entry)\n item.setIcon(0, icon)\n for i in range(len(entry)):\n if i>3:\n item.setTextAlignment(i, Qt.AlignRight)\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n self.insertTopLevelItem(0, item)\n if current_tx == tx_hash:\n self.setCurrentItem(item)\n\n def update_item(self, tx_hash, conf, timestamp):\n icon, time_str = self.get_icon(conf, timestamp)\n items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)\n if items:\n item = items[0]\n item.setIcon(0, icon)\n item.setText(2, time_str)\n\n def create_menu(self, position):\n self.selectedIndexes()\n item = self.currentItem()\n if not item:\n return\n tx_hash = str(item.data(0, Qt.UserRole).toString())\n if not tx_hash:\n return\n tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)\n if not tx_URL:\n return\n menu = QMenu()\n menu.addAction(_(\"Copy ID to Clipboard\"), lambda: self.parent.app.clipboard().setText(tx_hash))\n menu.addAction(_(\"Details\"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))\n menu.addAction(_(\"Edit description\"), lambda: self.editItem(item, self.editable_columns[0]))\n menu.addAction(_(\"View on block explorer\"), lambda: webbrowser.open(tx_URL))\n menu.exec_(self.viewport().mapToGlobal(position))\n", "path": "gui/qt/history_widget.py"}]} | 2,003 | 122 |
gh_patches_debug_41255 | rasdani/github-patches | git_diff | pantsbuild__pants-18035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`No such file or directory: 'local_dists.pex/PEX-INFO'` when running a `python_source`
**Describe the bug**
After updating one of our CI checks to `./pants run` a `python_source` directly (vs. the previous code which ran the `pex_binary`), some of our jobs started failing with:
```
Traceback (most recent call last):
File "/opt/python/3.8.14/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/opt/python/3.8.14/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py", line 89, in <module>
__venv_dir__ = __maybe_run_venv__(
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py", line 37, in __maybe_run_venv__
venv_dir = venv_dir(
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py", line 738, in venv_dir
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py", line 736, in add_pex_path_items
File "/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/pex_info.py", line 82, in from_pex
FileNotFoundError: [Errno 2] No such file or directory: 'local_dists.pex/PEX-INFO'
```
The `python_source` has `run_goal_use_sandbox=False`.
**Pants version**
v2.15.0rc1
**OS**
Linux
</issue>
<code>
[start of src/python/pants/backend/python/goals/run_helper.py]
1 # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from __future__ import annotations
4
5 import dataclasses
6 import os
7 import textwrap
8 from typing import Iterable, Optional
9
10 from pants.backend.python.subsystems.debugpy import DebugPy
11 from pants.backend.python.target_types import (
12 ConsoleScript,
13 PexEntryPointField,
14 ResolvedPexEntryPoint,
15 ResolvePexEntryPointRequest,
16 )
17 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
18 from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
19 from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest
20 from pants.backend.python.util_rules.pex_environment import PexEnvironment
21 from pants.backend.python.util_rules.pex_from_targets import (
22 InterpreterConstraintsRequest,
23 PexFromTargetsRequest,
24 )
25 from pants.backend.python.util_rules.python_sources import (
26 PythonSourceFiles,
27 PythonSourceFilesRequest,
28 )
29 from pants.core.goals.run import RunDebugAdapterRequest, RunRequest
30 from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
31 from pants.engine.addresses import Address
32 from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests
33 from pants.engine.rules import Get, MultiGet, rule_helper
34 from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
35
36
37 def _in_chroot(relpath: str) -> str:
38 return os.path.join("{chroot}", relpath)
39
40
41 @rule_helper
42 async def _create_python_source_run_request(
43 address: Address,
44 *,
45 entry_point_field: PexEntryPointField,
46 pex_env: PexEnvironment,
47 run_in_sandbox: bool,
48 pex_path: Iterable[Pex] = (),
49 console_script: Optional[ConsoleScript] = None,
50 ) -> RunRequest:
51 addresses = [address]
52 interpreter_constraints, entry_point, transitive_targets = await MultiGet(
53 Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),
54 Get(
55 ResolvedPexEntryPoint,
56 ResolvePexEntryPointRequest(entry_point_field),
57 ),
58 Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),
59 )
60
61 pex_filename = (
62 address.generated_name.replace(".", "_") if address.generated_name else address.target_name
63 )
64
65 pex_request, sources = await MultiGet(
66 Get(
67 PexRequest,
68 PexFromTargetsRequest(
69 addresses,
70 output_filename=f"{pex_filename}.pex",
71 internal_only=True,
72 include_source_files=False,
73 # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.
74 main=console_script or entry_point.val,
75 additional_args=(
76 # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH
77 # below, it's important for any app that re-executes itself that these environment
78 # variables are not stripped.
79 "--no-strip-pex-env",
80 ),
81 ),
82 ),
83 Get(
84 PythonSourceFiles,
85 PythonSourceFilesRequest(transitive_targets.closure, include_files=True),
86 ),
87 )
88
89 local_dists = await Get(
90 LocalDistsPex,
91 LocalDistsPexRequest(
92 addresses,
93 internal_only=True,
94 interpreter_constraints=interpreter_constraints,
95 sources=sources,
96 ),
97 )
98 pex_request = dataclasses.replace(
99 pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)
100 )
101
102 if run_in_sandbox:
103 # Note that a RunRequest always expects to run directly in the sandbox/workspace
104 # root, hence working_directory=None.
105 complete_pex_environment = pex_env.in_sandbox(working_directory=None)
106 else:
107 complete_pex_environment = pex_env.in_workspace()
108 venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))
109 input_digests = [
110 venv_pex.digest,
111 # Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied
112 # into the chroot when using inline sources. But they do, because some of them might be
113 # codegenned, and those won't exist in the inline source tree. Rather than incurring the
114 # complexity of figuring out here which sources were codegenned, we copy everything.
115 # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline
116 # sources will take precedence and their copies in the chroot will be ignored.
117 local_dists.remaining_sources.source_files.snapshot.digest,
118 ]
119 merged_digest = await Get(Digest, MergeDigests(input_digests))
120
121 chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]
122 # The order here is important: we want the in-repo sources to take precedence over their
123 # copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).
124 source_roots = [
125 *([] if run_in_sandbox else sources.source_roots),
126 *chrooted_source_roots,
127 ]
128 extra_env = {
129 **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None),
130 "PEX_EXTRA_SYS_PATH": os.pathsep.join(source_roots),
131 }
132
133 return RunRequest(
134 digest=merged_digest,
135 args=[_in_chroot(venv_pex.pex.argv0)],
136 extra_env=extra_env,
137 append_only_caches=complete_pex_environment.append_only_caches,
138 )
139
140
141 @rule_helper
142 async def _create_python_source_run_dap_request(
143 regular_run_request: RunRequest,
144 *,
145 debugpy: DebugPy,
146 debug_adapter: DebugAdapterSubsystem,
147 ) -> RunDebugAdapterRequest:
148 launcher_digest = await Get(
149 Digest,
150 CreateDigest(
151 [
152 FileContent(
153 "__debugpy_launcher.py",
154 textwrap.dedent(
155 """
156 import os
157 CHROOT = os.environ["PANTS_CHROOT"]
158
159 del os.environ["PEX_INTERPRETER"]
160
161 # See https://github.com/pantsbuild/pants/issues/17540
162 # For `run --debug-adapter`, the client might send a `pathMappings`
163 # (this is likely as VS Code likes to configure that by default) with
164 # a `remoteRoot` of ".". For `run`, CWD is set to the build root, so
165 # breakpoints set in-repo will never be hit. We fix this by monkeypatching
166 # pydevd (the library powering debugpy) so that a remoteRoot of "."
167 # means the sandbox root.
168
169 import debugpy._vendored.force_pydevd
170 from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor
171 orig_resolve_remote_root = PyDevJsonCommandProcessor._resolve_remote_root
172
173 def patched_resolve_remote_root(self, local_root, remote_root):
174 if remote_root == ".":
175 remote_root = CHROOT
176 return orig_resolve_remote_root(self, local_root, remote_root)
177
178 PyDevJsonCommandProcessor._resolve_remote_root = patched_resolve_remote_root
179
180 from debugpy.server import cli
181 cli.main()
182 """
183 ).encode("utf-8"),
184 ),
185 ]
186 ),
187 )
188
189 merged_digest = await Get(
190 Digest,
191 MergeDigests(
192 [
193 regular_run_request.digest,
194 launcher_digest,
195 ]
196 ),
197 )
198 extra_env = dict(regular_run_request.extra_env)
199 extra_env["PEX_INTERPRETER"] = "1"
200 extra_env["PANTS_CHROOT"] = _in_chroot("").rstrip("/")
201 args = [
202 regular_run_request.args[0], # python executable
203 _in_chroot("__debugpy_launcher.py"),
204 *debugpy.get_args(debug_adapter),
205 ]
206
207 return RunDebugAdapterRequest(
208 digest=merged_digest,
209 args=args,
210 extra_env=extra_env,
211 append_only_caches=regular_run_request.append_only_caches,
212 )
213
[end of src/python/pants/backend/python/goals/run_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/goals/run_helper.py b/src/python/pants/backend/python/goals/run_helper.py
--- a/src/python/pants/backend/python/goals/run_helper.py
+++ b/src/python/pants/backend/python/goals/run_helper.py
@@ -14,14 +14,9 @@
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
)
-from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
-from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
-from pants.backend.python.util_rules.pex_from_targets import (
- InterpreterConstraintsRequest,
- PexFromTargetsRequest,
-)
+from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
@@ -49,8 +44,7 @@
console_script: Optional[ConsoleScript] = None,
) -> RunRequest:
addresses = [address]
- interpreter_constraints, entry_point, transitive_targets = await MultiGet(
- Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),
+ entry_point, transitive_targets = await MultiGet(
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(entry_point_field),
@@ -70,6 +64,7 @@
output_filename=f"{pex_filename}.pex",
internal_only=True,
include_source_files=False,
+ include_local_dists=True,
# `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.
main=console_script or entry_point.val,
additional_args=(
@@ -86,18 +81,7 @@
),
)
- local_dists = await Get(
- LocalDistsPex,
- LocalDistsPexRequest(
- addresses,
- internal_only=True,
- interpreter_constraints=interpreter_constraints,
- sources=sources,
- ),
- )
- pex_request = dataclasses.replace(
- pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)
- )
+ pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path))
if run_in_sandbox:
# Note that a RunRequest always expects to run directly in the sandbox/workspace
@@ -114,7 +98,7 @@
# complexity of figuring out here which sources were codegenned, we copy everything.
# The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline
# sources will take precedence and their copies in the chroot will be ignored.
- local_dists.remaining_sources.source_files.snapshot.digest,
+ sources.source_files.snapshot.digest,
]
merged_digest = await Get(Digest, MergeDigests(input_digests))
| {"golden_diff": "diff --git a/src/python/pants/backend/python/goals/run_helper.py b/src/python/pants/backend/python/goals/run_helper.py\n--- a/src/python/pants/backend/python/goals/run_helper.py\n+++ b/src/python/pants/backend/python/goals/run_helper.py\n@@ -14,14 +14,9 @@\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n )\n-from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\n-from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest\n from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest\n from pants.backend.python.util_rules.pex_environment import PexEnvironment\n-from pants.backend.python.util_rules.pex_from_targets import (\n- InterpreterConstraintsRequest,\n- PexFromTargetsRequest,\n-)\n+from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\n from pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n@@ -49,8 +44,7 @@\n console_script: Optional[ConsoleScript] = None,\n ) -> RunRequest:\n addresses = [address]\n- interpreter_constraints, entry_point, transitive_targets = await MultiGet(\n- Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),\n+ entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(entry_point_field),\n@@ -70,6 +64,7 @@\n output_filename=f\"{pex_filename}.pex\",\n internal_only=True,\n include_source_files=False,\n+ include_local_dists=True,\n # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.\n main=console_script or entry_point.val,\n additional_args=(\n@@ -86,18 +81,7 @@\n ),\n )\n \n- local_dists = await Get(\n- LocalDistsPex,\n- LocalDistsPexRequest(\n- addresses,\n- internal_only=True,\n- interpreter_constraints=interpreter_constraints,\n- sources=sources,\n- ),\n- )\n- pex_request = dataclasses.replace(\n- pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)\n- )\n+ pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path))\n \n if run_in_sandbox:\n # Note that a RunRequest always expects to run directly in the sandbox/workspace\n@@ -114,7 +98,7 @@\n # complexity of figuring out here which sources were codegenned, we copy everything.\n # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline\n # sources will take precedence and their copies in the chroot will be ignored.\n- local_dists.remaining_sources.source_files.snapshot.digest,\n+ sources.source_files.snapshot.digest,\n ]\n merged_digest = await Get(Digest, MergeDigests(input_digests))\n", "issue": "`No such file or directory: 'local_dists.pex/PEX-INFO'` when running a `python_source`\n**Describe the bug**\r\n\r\nAfter updating one of our CI checks to `./pants run` a `python_source` directly (vs. the previous code which ran the `pex_binary`), some of our jobs started failing with:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/python/3.8.14/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/opt/python/3.8.14/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py\", line 89, in <module>\r\n __venv_dir__ = __maybe_run_venv__(\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/./src.pex/__main__.py\", line 37, in __maybe_run_venv__\r\n venv_dir = venv_dir(\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py\", line 738, in venv_dir\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/variables.py\", line 736, in add_pex_path_items\r\n File \"/home/runner/.pants/execution/pants-sandbox-g6NiAf/src.pex/.bootstrap/pex/pex_info.py\", line 82, in from_pex\r\nFileNotFoundError: [Errno 2] No such file or directory: 'local_dists.pex/PEX-INFO'\r\n```\r\n\r\nThe `python_source` has `run_goal_use_sandbox=False`.\r\n\r\n**Pants version**\r\n\r\nv2.15.0rc1\r\n\r\n**OS**\r\n\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport dataclasses\nimport os\nimport textwrap\nfrom typing import Iterable, Optional\n\nfrom pants.backend.python.subsystems.debugpy import DebugPy\nfrom pants.backend.python.target_types import (\n ConsoleScript,\n PexEntryPointField,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex, VenvPexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import (\n InterpreterConstraintsRequest,\n PexFromTargetsRequest,\n)\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunDebugAdapterRequest, RunRequest\nfrom pants.core.subsystems.debug_adapter import DebugAdapterSubsystem\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, rule_helper\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\n\n\ndef _in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n\n@rule_helper\nasync def _create_python_source_run_request(\n address: Address,\n *,\n entry_point_field: PexEntryPointField,\n pex_env: PexEnvironment,\n run_in_sandbox: bool,\n pex_path: Iterable[Pex] = (),\n console_script: Optional[ConsoleScript] = None,\n) -> RunRequest:\n addresses = [address]\n interpreter_constraints, entry_point, transitive_targets = await MultiGet(\n Get(InterpreterConstraints, InterpreterConstraintsRequest(addresses)),\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(entry_point_field),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),\n )\n\n pex_filename = (\n address.generated_name.replace(\".\", \"_\") if address.generated_name else address.target_name\n )\n\n pex_request, sources = await MultiGet(\n Get(\n PexRequest,\n PexFromTargetsRequest(\n addresses,\n output_filename=f\"{pex_filename}.pex\",\n internal_only=True,\n include_source_files=False,\n # `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.\n main=console_script or entry_point.val,\n additional_args=(\n # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH\n # below, it's important for any app that re-executes itself that these environment\n # variables are not stripped.\n \"--no-strip-pex-env\",\n ),\n ),\n ),\n Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(transitive_targets.closure, include_files=True),\n ),\n )\n\n local_dists = await Get(\n LocalDistsPex,\n LocalDistsPexRequest(\n addresses,\n internal_only=True,\n interpreter_constraints=interpreter_constraints,\n sources=sources,\n ),\n )\n pex_request = dataclasses.replace(\n pex_request, pex_path=(*pex_request.pex_path, local_dists.pex, *pex_path)\n )\n\n if run_in_sandbox:\n # Note that a RunRequest always expects to run directly in the sandbox/workspace\n # root, hence working_directory=None.\n complete_pex_environment = pex_env.in_sandbox(working_directory=None)\n else:\n complete_pex_environment = pex_env.in_workspace()\n venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))\n input_digests = [\n venv_pex.digest,\n # Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied\n # into the chroot when using inline sources. But they do, because some of them might be\n # codegenned, and those won't exist in the inline source tree. Rather than incurring the\n # complexity of figuring out here which sources were codegenned, we copy everything.\n # The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline\n # sources will take precedence and their copies in the chroot will be ignored.\n local_dists.remaining_sources.source_files.snapshot.digest,\n ]\n merged_digest = await Get(Digest, MergeDigests(input_digests))\n\n chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]\n # The order here is important: we want the in-repo sources to take precedence over their\n # copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).\n source_roots = [\n *([] if run_in_sandbox else sources.source_roots),\n *chrooted_source_roots,\n ]\n extra_env = {\n **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None),\n \"PEX_EXTRA_SYS_PATH\": os.pathsep.join(source_roots),\n }\n\n return RunRequest(\n digest=merged_digest,\n args=[_in_chroot(venv_pex.pex.argv0)],\n extra_env=extra_env,\n append_only_caches=complete_pex_environment.append_only_caches,\n )\n\n\n@rule_helper\nasync def _create_python_source_run_dap_request(\n regular_run_request: RunRequest,\n *,\n debugpy: DebugPy,\n debug_adapter: DebugAdapterSubsystem,\n) -> RunDebugAdapterRequest:\n launcher_digest = await Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n \"__debugpy_launcher.py\",\n textwrap.dedent(\n \"\"\"\n import os\n CHROOT = os.environ[\"PANTS_CHROOT\"]\n\n del os.environ[\"PEX_INTERPRETER\"]\n\n # See https://github.com/pantsbuild/pants/issues/17540\n # For `run --debug-adapter`, the client might send a `pathMappings`\n # (this is likely as VS Code likes to configure that by default) with\n # a `remoteRoot` of \".\". For `run`, CWD is set to the build root, so\n # breakpoints set in-repo will never be hit. We fix this by monkeypatching\n # pydevd (the library powering debugpy) so that a remoteRoot of \".\"\n # means the sandbox root.\n\n import debugpy._vendored.force_pydevd\n from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor\n orig_resolve_remote_root = PyDevJsonCommandProcessor._resolve_remote_root\n\n def patched_resolve_remote_root(self, local_root, remote_root):\n if remote_root == \".\":\n remote_root = CHROOT\n return orig_resolve_remote_root(self, local_root, remote_root)\n\n PyDevJsonCommandProcessor._resolve_remote_root = patched_resolve_remote_root\n\n from debugpy.server import cli\n cli.main()\n \"\"\"\n ).encode(\"utf-8\"),\n ),\n ]\n ),\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [\n regular_run_request.digest,\n launcher_digest,\n ]\n ),\n )\n extra_env = dict(regular_run_request.extra_env)\n extra_env[\"PEX_INTERPRETER\"] = \"1\"\n extra_env[\"PANTS_CHROOT\"] = _in_chroot(\"\").rstrip(\"/\")\n args = [\n regular_run_request.args[0], # python executable\n _in_chroot(\"__debugpy_launcher.py\"),\n *debugpy.get_args(debug_adapter),\n ]\n\n return RunDebugAdapterRequest(\n digest=merged_digest,\n args=args,\n extra_env=extra_env,\n append_only_caches=regular_run_request.append_only_caches,\n )\n", "path": "src/python/pants/backend/python/goals/run_helper.py"}]} | 3,304 | 681 |
gh_patches_debug_627 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Terraform parsing error string with escaped backslash at the end
**Describe the issue**
Checkov crashes if it encounters an escaped backslash (`"\\"`) at the end of a string.
**Examples**
Minimal example to reproduce the error:
```terraform
variable "slash" {
default = "\\"
}
output "slash" {
value = var.slash
}
```
`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.
This only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.
```terraform
variable "slash" {
default = "\\"
}
```
**Exception Trace**
Relevant traceback
```sh
> LOG_LEVEL=DEBUG checkov -d .
[...]
[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf
Traceback (most recent call last):
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py", line 726, in _load_or_die_quietly
raw_data = hcl2.load(f)
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 12, in load
return loads(file.read())
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 80, in loads
raise ValueError(f"Line has unclosed quote marks: {line}")
ValueError: Line has unclosed quote marks: default = "\\"
[...]
```
**Desktop (please complete the following information):**
- OS: MacOS 12.3.1 (Intel)
- Checkov Version: 2.0.1230
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.42",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions>=4.1.0",
60 "cachetools",
61 "cyclonedx-python-lib>=2.4.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0",
68 "prettytable>=3.0.0",
69 "pycep-parser==0.3.7",
70 "charset-normalizer",
71 ],
72 license="Apache License 2.0",
73 name="checkov",
74 version=version,
75 python_requires=">=3.7",
76 description="Infrastructure as code static analysis",
77 author="bridgecrew",
78 author_email="[email protected]",
79 url="https://github.com/bridgecrewio/checkov",
80 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
81 include_package_data=True,
82 package_dir={
83 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
84 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
85 },
86 package_data={
87 "checkov": ["py.typed"],
88 "checkov.bicep.checks.graph_checks": ["*.yaml"],
89 "checkov.common.util.templates": ["*.jinja2"],
90 "checkov.terraform.checks.graph_checks": [
91 "aws/*.yaml",
92 "gcp/*.yaml",
93 "azure/*.yaml",
94 ],
95 },
96 scripts=["bin/checkov", "bin/checkov.cmd"],
97 long_description=long_description,
98 long_description_content_type="text/markdown",
99 classifiers=[
100 "Environment :: Console",
101 "Intended Audience :: Developers",
102 "Intended Audience :: System Administrators",
103 "License :: OSI Approved :: Apache Software License",
104 "Programming Language :: Python :: 3 :: Only",
105 "Programming Language :: Python :: 3.7",
106 "Programming Language :: Python :: 3.8",
107 "Programming Language :: Python :: 3.9",
108 "Programming Language :: Python :: 3.10",
109 "Topic :: Security",
110 "Topic :: Software Development :: Build Tools",
111 "Typing :: Typed",
112 ],
113 )
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
]
},
install_requires=[
- "bc-python-hcl2==0.3.42",
+ "bc-python-hcl2==0.3.44",
"cloudsplaining>=0.4.1",
"deep_merge",
"tabulate",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n ]\n },\n install_requires=[\n- \"bc-python-hcl2==0.3.42\",\n+ \"bc-python-hcl2==0.3.44\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n", "issue": "Terraform parsing error string with escaped backslash at the end\n**Describe the issue**\r\nCheckov crashes if it encounters an escaped backslash (`\"\\\\\"`) at the end of a string.\r\n\r\n**Examples**\r\nMinimal example to reproduce the error:\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n\r\noutput \"slash\" {\r\n value = var.slash\r\n}\r\n```\r\n`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.\r\n\r\nThis only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n```\r\n\r\n**Exception Trace**\r\nRelevant traceback\r\n```sh\r\n> LOG_LEVEL=DEBUG checkov -d .\r\n[...]\r\n[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf\r\nTraceback (most recent call last):\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py\", line 726, in _load_or_die_quietly\r\n raw_data = hcl2.load(f)\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 12, in load\r\n return loads(file.read())\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 80, in loads\r\n raise ValueError(f\"Line has unclosed quote marks: {line}\")\r\nValueError: Line has unclosed quote marks: default = \"\\\\\"\r\n[...]\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 12.3.1 (Intel)\r\n - Checkov Version: 2.0.1230\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.42\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.7\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]} | 2,038 | 94 |
gh_patches_debug_25963 | rasdani/github-patches | git_diff | pymedusa__Medusa-3131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login error
```
Error
unhashable type: 'list'
Traceback
Traceback (most recent call last):
File "/home/pi/Medusa/ext/tornado/web.py", line 1509, in _execute result = method(*self.path_args, **self.path_kwargs)
File "/home/pi/Medusa/medusa/server/web/core/authentication.py", line 73, in post notifiers.notify_login(self.request.remote_ip)
File "/home/pi/Medusa/medusa/notifiers/__init__.py", line 127, in notify_login n.notify_login(ipaddress)
File "/home/pi/Medusa/medusa/notifiers/nma.py", line 44, in notify_login self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))
File "/home/pi/Medusa/medusa/notifiers/nma.py", line 74, in _sendNMA if not response[nma_api][u'code'] == u'200':
TypeError: unhashable type: 'list'
Request Info
body: username=supergonkas&password=&remember_me=1&submit=Login
files: {}
protocol: http
connection:
body_arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login']}
uri: /login/?next=%2Fhistory%2F
query_arguments: {'next': ['/history/']}
_start_time: 1505997382.06
headers: Origin: http://:8081 Save-Data: on Content-Length: 61 Accept-Language: pt-PT,pt;q=0.8,en-US;q=0.6,en;q=0.4,es;q=0.2 Accept-Encoding: gzip, deflate Connection: keep-alive Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 Upgrade-Insecure-Requests: 1 Dnt: 1 Host::8081 Referer: http://:8081/login/?next=%2Fhistory%2F Cache-Control: max-age=0 User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36 Content-Type: application/x-www-form-urlencoded
host: :8081
version: HTTP/1.1
server_connection:
host_name: s
_finish_time: None
query: next=%2Fhistory%2F
arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login'], 'next': ['/history/']}
path: /login/
method: POST
remote_ip: 194.210.190.12
View Log(Errors)
```
</issue>
<code>
[start of medusa/notifiers/nma.py]
1 # coding=utf-8
2
3 import logging
4
5 from medusa import app, common
6 from medusa.logger.adapters.style import BraceAdapter
7
8 from pynma import pynma
9
10 log = BraceAdapter(logging.getLogger(__name__))
11 log.logger.addHandler(logging.NullHandler())
12
13
14 class Notifier(object):
15 def test_notify(self, nma_api, nma_priority):
16 return self._sendNMA(nma_api, nma_priority, event='Test', message='Testing NMA settings from Medusa',
17 force=True)
18
19 def notify_snatch(self, ep_name, is_proper):
20 if app.NMA_NOTIFY_ONSNATCH:
21 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]],
22 message=ep_name)
23
24 def notify_download(self, ep_name):
25 if app.NMA_NOTIFY_ONDOWNLOAD:
26 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],
27 message=ep_name)
28
29 def notify_subtitle_download(self, ep_name, lang):
30 if app.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:
31 self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],
32 message=ep_name + ': ' + lang)
33
34 def notify_git_update(self, new_version='??'):
35 if app.USE_NMA:
36 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
37 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
38 self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)
39
40 def notify_login(self, ipaddress=''):
41 if app.USE_NMA:
42 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
43 title = common.notifyStrings[common.NOTIFY_LOGIN]
44 self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))
45
46 def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):
47
48 title = 'Medusa'
49
50 if not app.USE_NMA and not force:
51 return False
52
53 if nma_api is None:
54 nma_api = app.NMA_API
55 else:
56 nma_api = nma_api.split(',')
57
58 if nma_priority is None:
59 nma_priority = app.NMA_PRIORITY
60
61 batch = False
62
63 p = pynma.PyNMA()
64 keys = nma_api
65 p.addkey(keys)
66
67 if len(keys) > 1:
68 batch = True
69
70 log.debug(u'NMA: Sending notice with details: event="{0}, message="{1}", priority={2}, batch={3}',
71 event, message, nma_priority, batch)
72 response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)
73
74 if not response[nma_api][u'code'] == u'200':
75 log.error(u'Could not send notification to NotifyMyAndroid')
76 return False
77 else:
78 log.info(u'NMA: Notification sent to NotifyMyAndroid')
79 return True
80
[end of medusa/notifiers/nma.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/notifiers/nma.py b/medusa/notifiers/nma.py
--- a/medusa/notifiers/nma.py
+++ b/medusa/notifiers/nma.py
@@ -4,8 +4,8 @@
from medusa import app, common
from medusa.logger.adapters.style import BraceAdapter
-
from pynma import pynma
+from six import text_type
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
@@ -52,8 +52,8 @@
if nma_api is None:
nma_api = app.NMA_API
- else:
- nma_api = nma_api.split(',')
+ elif isinstance(nma_api, text_type):
+ nma_api = [nma_api]
if nma_priority is None:
nma_priority = app.NMA_PRIORITY
@@ -71,7 +71,7 @@
event, message, nma_priority, batch)
response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)
- if not response[nma_api][u'code'] == u'200':
+ if not response[','.join(nma_api)][u'code'] == u'200':
log.error(u'Could not send notification to NotifyMyAndroid')
return False
else:
| {"golden_diff": "diff --git a/medusa/notifiers/nma.py b/medusa/notifiers/nma.py\n--- a/medusa/notifiers/nma.py\n+++ b/medusa/notifiers/nma.py\n@@ -4,8 +4,8 @@\n \n from medusa import app, common\n from medusa.logger.adapters.style import BraceAdapter\n-\n from pynma import pynma\n+from six import text_type\n \n log = BraceAdapter(logging.getLogger(__name__))\n log.logger.addHandler(logging.NullHandler())\n@@ -52,8 +52,8 @@\n \n if nma_api is None:\n nma_api = app.NMA_API\n- else:\n- nma_api = nma_api.split(',')\n+ elif isinstance(nma_api, text_type):\n+ nma_api = [nma_api]\n \n if nma_priority is None:\n nma_priority = app.NMA_PRIORITY\n@@ -71,7 +71,7 @@\n event, message, nma_priority, batch)\n response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)\n \n- if not response[nma_api][u'code'] == u'200':\n+ if not response[','.join(nma_api)][u'code'] == u'200':\n log.error(u'Could not send notification to NotifyMyAndroid')\n return False\n else:\n", "issue": "Login error\n```\r\nError\r\nunhashable type: 'list'\r\nTraceback\r\nTraceback (most recent call last): \r\nFile \"/home/pi/Medusa/ext/tornado/web.py\", line 1509, in _execute result = method(*self.path_args, **self.path_kwargs) \r\nFile \"/home/pi/Medusa/medusa/server/web/core/authentication.py\", line 73, in post notifiers.notify_login(self.request.remote_ip) \r\nFile \"/home/pi/Medusa/medusa/notifiers/__init__.py\", line 127, in notify_login n.notify_login(ipaddress)\r\nFile \"/home/pi/Medusa/medusa/notifiers/nma.py\", line 44, in notify_login self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress)) \r\nFile \"/home/pi/Medusa/medusa/notifiers/nma.py\", line 74, in _sendNMA if not response[nma_api][u'code'] == u'200': \r\nTypeError: unhashable type: 'list' \r\nRequest Info\r\nbody: username=supergonkas&password=&remember_me=1&submit=Login\r\nfiles: {}\r\nprotocol: http\r\nconnection: \r\nbody_arguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login']}\r\nuri: /login/?next=%2Fhistory%2F\r\nquery_arguments: {'next': ['/history/']}\r\n_start_time: 1505997382.06\r\nheaders: Origin: http://:8081 Save-Data: on Content-Length: 61 Accept-Language: pt-PT,pt;q=0.8,en-US;q=0.6,en;q=0.4,es;q=0.2 Accept-Encoding: gzip, deflate Connection: keep-alive Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 Upgrade-Insecure-Requests: 1 Dnt: 1 Host::8081 Referer: http://:8081/login/?next=%2Fhistory%2F Cache-Control: max-age=0 User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36 Content-Type: application/x-www-form-urlencoded \r\nhost: :8081\r\nversion: HTTP/1.1\r\nserver_connection: \r\nhost_name: s\r\n_finish_time: None\r\nquery: next=%2Fhistory%2F\r\narguments: {'username': ['supergonkas'], 'remember_me': ['1'], 'password': [''], 'submit': ['Login'], 'next': ['/history/']}\r\npath: /login/\r\nmethod: POST\r\nremote_ip: 194.210.190.12\r\nView Log(Errors)\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\nimport logging\n\nfrom medusa import app, common\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom pynma import pynma\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, nma_api, nma_priority):\n return self._sendNMA(nma_api, nma_priority, event='Test', message='Testing NMA settings from Medusa',\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.NMA_NOTIFY_ONSNATCH:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]],\n message=ep_name)\n\n def notify_download(self, ep_name):\n if app.NMA_NOTIFY_ONDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n message=ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n message=ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_NMA:\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text.format(ipaddress))\n\n def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):\n\n title = 'Medusa'\n\n if not app.USE_NMA and not force:\n return False\n\n if nma_api is None:\n nma_api = app.NMA_API\n else:\n nma_api = nma_api.split(',')\n\n if nma_priority is None:\n nma_priority = app.NMA_PRIORITY\n\n batch = False\n\n p = pynma.PyNMA()\n keys = nma_api\n p.addkey(keys)\n\n if len(keys) > 1:\n batch = True\n\n log.debug(u'NMA: Sending notice with details: event=\"{0}, message=\"{1}\", priority={2}, batch={3}',\n event, message, nma_priority, batch)\n response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)\n\n if not response[nma_api][u'code'] == u'200':\n log.error(u'Could not send notification to NotifyMyAndroid')\n return False\n else:\n log.info(u'NMA: Notification sent to NotifyMyAndroid')\n return True\n", "path": "medusa/notifiers/nma.py"}]} | 2,076 | 304 |
gh_patches_debug_16829 | rasdani/github-patches | git_diff | pyload__pyload-1412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Smoozed
Hello,
Smooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.
Would be great if somebody can have a look on it.
Thanks
</issue>
<code>
[start of module/plugins/accounts/SmoozedCom.py]
1 # -*- coding: utf-8 -*-
2
3 import hashlib
4 import time
5
6 try:
7 from beaker.crypto.pbkdf2 import PBKDF2
8
9 except ImportError:
10 from beaker.crypto.pbkdf2 import pbkdf2
11 from binascii import b2a_hex
12
13 class PBKDF2(object):
14 def __init__(self, passphrase, salt, iterations=1000):
15 self.passphrase = passphrase
16 self.salt = salt
17 self.iterations = iterations
18
19 def hexread(self, octets):
20 return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))
21
22 from module.common.json_layer import json_loads
23 from module.plugins.Account import Account
24
25
26 class SmoozedCom(Account):
27 __name__ = "SmoozedCom"
28 __type__ = "account"
29 __version__ = "0.04"
30
31 __description__ = """Smoozed.com account plugin"""
32 __license__ = "GPLv3"
33 __authors__ = [("", "")]
34
35
36 def loadAccountInfo(self, user, req):
37 # Get user data from premiumize.me
38 status = self.getAccountStatus(user, req)
39
40 self.logDebug(status)
41
42 if status['state'] != 'ok':
43 info = {'validuntil' : None,
44 'trafficleft': None,
45 'premium' : False}
46 else:
47 # Parse account info
48 info = {'validuntil' : float(status["data"]["user"]["user_premium"]),
49 'trafficleft': max(0, status["data"]["traffic"][1] - status["data"]["traffic"][0]),
50 'session' : status["data"]["session_key"],
51 'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
52
53 if info['validuntil'] < time.time():
54 info['premium'] = False
55 else:
56 info['premium'] = True
57
58 return info
59
60
61 def login(self, user, data, req):
62 # Get user data from premiumize.me
63 status = self.getAccountStatus(user, req)
64
65 # Check if user and password are valid
66 if status['state'] != 'ok':
67 self.wrongPassword()
68
69
70 def getAccountStatus(self, user, req):
71 password = self.getAccountData(user)['password']
72 salt = hashlib.sha256(password).hexdigest()
73 encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)
74
75 return json_loads(req.load("http://www2.smoozed.com/api/login",
76 get={'auth': user, 'password': encrypted}))
77
[end of module/plugins/accounts/SmoozedCom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py
--- a/module/plugins/accounts/SmoozedCom.py
+++ b/module/plugins/accounts/SmoozedCom.py
@@ -34,7 +34,6 @@
def loadAccountInfo(self, user, req):
- # Get user data from premiumize.me
status = self.getAccountStatus(user, req)
self.logDebug(status)
@@ -51,7 +50,10 @@
'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
if info['validuntil'] < time.time():
- info['premium'] = False
+ if float(status["data"]["user"].get("user_trial", 0)) > time.time():
+ info['premium'] = True
+ else:
+ info['premium'] = False
else:
info['premium'] = True
| {"golden_diff": "diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py\n--- a/module/plugins/accounts/SmoozedCom.py\n+++ b/module/plugins/accounts/SmoozedCom.py\n@@ -34,7 +34,6 @@\n \n \n def loadAccountInfo(self, user, req):\n- # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n \n self.logDebug(status)\n@@ -51,7 +50,10 @@\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n \n if info['validuntil'] < time.time():\n- info['premium'] = False\n+ if float(status[\"data\"][\"user\"].get(\"user_trial\", 0)) > time.time():\n+ info['premium'] = True\n+ else:\n+ info['premium'] = False\n else:\n info['premium'] = True\n", "issue": "Smoozed\nHello,\n\nSmooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.\n\nWould be great if somebody can have a look on it.\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport hashlib\nimport time\n\ntry:\n from beaker.crypto.pbkdf2 import PBKDF2\n\nexcept ImportError:\n from beaker.crypto.pbkdf2 import pbkdf2\n from binascii import b2a_hex\n\n class PBKDF2(object):\n def __init__(self, passphrase, salt, iterations=1000):\n self.passphrase = passphrase\n self.salt = salt\n self.iterations = iterations\n\n def hexread(self, octets):\n return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Account import Account\n\n\nclass SmoozedCom(Account):\n __name__ = \"SmoozedCom\"\n __type__ = \"account\"\n __version__ = \"0.04\"\n\n __description__ = \"\"\"Smoozed.com account plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"\", \"\")]\n\n\n def loadAccountInfo(self, user, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n self.logDebug(status)\n\n if status['state'] != 'ok':\n info = {'validuntil' : None,\n 'trafficleft': None,\n 'premium' : False}\n else:\n # Parse account info\n info = {'validuntil' : float(status[\"data\"][\"user\"][\"user_premium\"]),\n 'trafficleft': max(0, status[\"data\"][\"traffic\"][1] - status[\"data\"][\"traffic\"][0]),\n 'session' : status[\"data\"][\"session_key\"],\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n\n if info['validuntil'] < time.time():\n info['premium'] = False\n else:\n info['premium'] = True\n\n return info\n\n\n def login(self, user, data, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n # Check if user and password are valid\n if status['state'] != 'ok':\n self.wrongPassword()\n\n\n def getAccountStatus(self, user, req):\n password = self.getAccountData(user)['password']\n salt = hashlib.sha256(password).hexdigest()\n encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)\n\n return json_loads(req.load(\"http://www2.smoozed.com/api/login\",\n get={'auth': user, 'password': encrypted}))\n", "path": "module/plugins/accounts/SmoozedCom.py"}]} | 1,333 | 214 |
gh_patches_debug_6986 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAE can't handle MPS backend
## 🐛 Bug
The currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()
### To Reproduce
This works:
```python
import torchmetrics
import torch
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
acc = torchmetrics.Accuracy().to(torch.device("mps"))
acc.update(a, a)
acc.compute()
```
this also works:
```
a = torch.tensor([1, 2, 3])
mae = torchmetrics.MeanAbsoluteError()
mae.update(a, a)
mae.compute()
```
but this crashes
```python
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
mae = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
mae.update(a, a)
mae.compute()
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [12], in <module>
1 a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
3 acc = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
----> 4 acc.update(a, a)
5 acc.compute()
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)
389 with torch.set_grad_enabled(self._enable_grad):
390 try:
--> 391 update(*args, **kwargs)
392 except RuntimeError as err:
393 if "Expected all tensors to be on" in str(err):
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)
56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
57 """Update state with predictions and targets.
58
59 Args:
60 preds: Predictions from model
61 target: Ground truth values
62 """
---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
65 self.sum_abs_error += sum_abs_error
66 self.total += n_obs
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
(...)
29 target: Ground truth tensor
30 """
32 _check_same_shape(preds, target)
---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
TypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.
```
### Environment
```
torch : 1.12.1
lightning : 2022.9.8
torchmetrics: 0.9.3
```
</issue>
<code>
[start of src/torchmetrics/functional/regression/mae.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _check_same_shape
20
21
22 def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
26
27 Args:
28 preds: Predicted tensor
29 target: Ground truth tensor
30 """
31
32 _check_same_shape(preds, target)
33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
36
37
38 def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:
39 """Computes Mean Absolute Error.
40
41 Args:
42 sum_abs_error: Sum of absolute value of errors over all observations
43 n_obs: Number of predictions or observations
44
45 Example:
46 >>> preds = torch.tensor([0., 1, 2, 3])
47 >>> target = torch.tensor([0., 1, 2, 2])
48 >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
49 >>> _mean_absolute_error_compute(sum_abs_error, n_obs)
50 tensor(0.2500)
51 """
52
53 return sum_abs_error / n_obs
54
55
56 def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:
57 """Computes mean absolute error.
58
59 Args:
60 preds: estimated labels
61 target: ground truth labels
62
63 Return:
64 Tensor with MAE
65
66 Example:
67 >>> from torchmetrics.functional import mean_absolute_error
68 >>> x = torch.tensor([0., 1, 2, 3])
69 >>> y = torch.tensor([0., 1, 2, 2])
70 >>> mean_absolute_error(x, y)
71 tensor(0.2500)
72 """
73 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
74 return _mean_absolute_error_compute(sum_abs_error, n_obs)
75
[end of src/torchmetrics/functional/regression/mae.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py
--- a/src/torchmetrics/functional/regression/mae.py
+++ b/src/torchmetrics/functional/regression/mae.py
@@ -28,8 +28,9 @@
preds: Predicted tensor
target: Ground truth tensor
"""
-
_check_same_shape(preds, target)
+ preds = preds if preds.is_floating_point else preds.float()
+ target = target if target.is_floating_point else target.float()
sum_abs_error = torch.sum(torch.abs(preds - target))
n_obs = target.numel()
return sum_abs_error, n_obs
| {"golden_diff": "diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py\n--- a/src/torchmetrics/functional/regression/mae.py\n+++ b/src/torchmetrics/functional/regression/mae.py\n@@ -28,8 +28,9 @@\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n-\n _check_same_shape(preds, target)\n+ preds = preds if preds.is_floating_point else preds.float()\n+ target = target if target.is_floating_point else target.float()\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n", "issue": "MAE can't handle MPS backend\n## \ud83d\udc1b Bug\r\n\r\nThe currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()\r\n\r\n### To Reproduce\r\n\r\nThis works:\r\n\r\n```python\r\nimport torchmetrics\r\nimport torch\r\n\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nacc = torchmetrics.Accuracy().to(torch.device(\"mps\"))\r\nacc.update(a, a)\r\nacc.compute()\r\n```\r\n\r\nthis also works:\r\n\r\n```\r\na = torch.tensor([1, 2, 3])\r\n\r\nmae = torchmetrics.MeanAbsoluteError()\r\nmae.update(a, a)\r\nmae.compute()\r\n\r\n```\r\n\r\nbut this crashes\r\n\r\n```python\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nmae = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\nmae.update(a, a)\r\nmae.compute()\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nInput In [12], in <module>\r\n 1 a = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n 3 acc = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\n----> 4 acc.update(a, a)\r\n 5 acc.compute()\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)\r\n 389 with torch.set_grad_enabled(self._enable_grad):\r\n 390 try:\r\n--> 391 update(*args, **kwargs)\r\n 392 except RuntimeError as err:\r\n 393 if \"Expected all tensors to be on\" in str(err):\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)\r\n 56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\r\n 57 \"\"\"Update state with predictions and targets.\r\n 58 \r\n 59 Args:\r\n 60 preds: Predictions from model\r\n 61 target: Ground truth values\r\n 62 \"\"\"\r\n---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\r\n 65 self.sum_abs_error += sum_abs_error\r\n 66 self.total += n_obs\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)\r\n 23 \"\"\"Updates and returns variables required to compute Mean Absolute Error.\r\n 24 \r\n 25 Checks for same shape of input tensors.\r\n (...)\r\n 29 target: Ground truth tensor\r\n 30 \"\"\"\r\n 32 _check_same_shape(preds, target)\r\n---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))\r\n 34 n_obs = target.numel()\r\n 35 return sum_abs_error, n_obs\r\n\r\nTypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```\r\ntorch : 1.12.1\r\nlightning : 2022.9.8\r\ntorchmetrics: 0.9.3\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:\n \"\"\"Updates and returns variables required to compute Mean Absolute Error.\n\n Checks for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n\n _check_same_shape(preds, target)\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n\n\ndef _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:\n \"\"\"Computes Mean Absolute Error.\n\n Args:\n sum_abs_error: Sum of absolute value of errors over all observations\n n_obs: Number of predictions or observations\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n >>> _mean_absolute_error_compute(sum_abs_error, n_obs)\n tensor(0.2500)\n \"\"\"\n\n return sum_abs_error / n_obs\n\n\ndef mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Computes mean absolute error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n\n Return:\n Tensor with MAE\n\n Example:\n >>> from torchmetrics.functional import mean_absolute_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_absolute_error(x, y)\n tensor(0.2500)\n \"\"\"\n sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n return _mean_absolute_error_compute(sum_abs_error, n_obs)\n", "path": "src/torchmetrics/functional/regression/mae.py"}]} | 2,030 | 160 |
gh_patches_debug_22360 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3243 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Links do not work after adding check constraint to a table
## Description
It appears that tables with any check constraint (e.g. on an unrelated column) breaks the display of all links on that table.
The foreign key constraint is held by the underlying database, and the links are shown in the link panel on the sidebar. However, in the grid the cells just show the raw value rather than a dropdown.
Another clue: clicking table > advanced > constraints results shows the error 'Unable to fetch table constraints When making an XHR request, the server responded with an error, but the response body was not valid JSON.' Under the hood this corresponds with a 500 request to `/api/db/v0/tables/22/constraints/?limit=500`
<details>
<summary>
500 response body
</summary>
```
<!doctype html>
<html lang="en">
<head>
<title>Server Error (500)</title>
</head>
<body>
<h1>Server Error (500)</h1><p></p>
</body>
</html>
```
</details>
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Steps:
1. Add a link column to a table in Mathesar OR in another tool, add a foreign key constraint and refresh the schema in Mathesar
2. [at this point, if you view the table you should see the link as a dropdown]
3. In another tool, add a constraint to any column in the table
4. View the table in Mathesar: The links continue to display fine
5. Refresh the schema in Mathesar
6. View the table in Mathesar
Expected: The links display as before
Actual: The raw value of the links (e.g. the number for an integer id column) are shown
7. In another tool, remove the constraint
8. View the table in Mathesar: all the data will disappear. Adding a record also seems broken from this point, but under the hood the record is actually added.
9. Refresh the schema in Mathesar
10. View the table in Mathesar: all the data is present and link works correctly again
Video:
This is all mock data. Ignore bit where I try to add person_id manually at the beginning, this is force of habit 😅. Also, I add a record in the external tool but this is not necessary for reproduction (this is just meant to show the check constraint appears to work fine in other tools, and doesn't stop manipulating data there, so I don't think this is a problem with the other tool).
https://github.com/centerofci/mathesar/assets/4953590/044e5e4f-ddbc-4d77-8032-c08375ff9055
## Environment
- OS: Host: macOS, but mathesar 0.1.2 is running inside a [podman](https://podman.io/) container. I don't think this is relevant though.
- Browser: Google Chrome
- Browser Version: 115.0.5790.114 (Official Build) (arm64)
- Other info: Using postgres:14 container.
</issue>
<code>
[start of mathesar/api/serializers/constraints.py]
1 from psycopg.errors import DuplicateTable, UniqueViolation
2 from rest_framework import serializers, status
3
4 from db.constraints import utils as constraint_utils
5 from db.identifiers import is_identifier_too_long
6 from db.constraints.base import ForeignKeyConstraint, UniqueConstraint
7
8 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,
11 InvalidTableName
12 )
13 from mathesar.api.serializers.shared_serializers import (
14 MathesarPolymorphicErrorMixin,
15 ReadWritePolymorphicSerializerMappingMixin,
16 )
17 from mathesar.models.base import Column, Constraint, Table
18
19
20 class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
21 """
22 Limits the accepted related primary key values to a specific table.
23 For example, if the PrimaryKeyRelatedField is instantiated with a
24 Column queryset, only columns in the "associated table" are
25 accepted. The "associated table" is defined by the context dict's
26 `table_id` value.
27 """
28 def get_queryset(self):
29 table_id = self.context.get('table_id', None)
30 queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()
31 if table_id is None or not queryset:
32 return None
33 return queryset.filter(table__id=table_id)
34
35
36 class BaseConstraintSerializer(serializers.ModelSerializer):
37 name = serializers.CharField(required=False)
38 type = serializers.CharField()
39 columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)
40
41 class Meta:
42 model = Constraint
43 fields = ['id', 'name', 'type', 'columns']
44
45 def construct_constraint_obj(self, table, data):
46 columns_attnum = [column.attnum for column in data.get('columns')]
47 if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:
48 return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)
49 return None
50
51 def create(self, validated_data):
52 table = self.context['table']
53 constraint_obj = self.construct_constraint_obj(table, validated_data)
54 # Additional check is needed because we support read operations for primary key constraint,
55 # but we don't support write operations
56 if constraint_obj is None:
57 constraint_type = validated_data.get('type', None)
58 raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')
59 try:
60 constraint = table.add_constraint(constraint_obj)
61 except DuplicateTable as e:
62 raise database_api_exceptions.DuplicateTableAPIException(
63 e,
64 message='Relation with the same name already exists',
65 status_code=status.HTTP_400_BAD_REQUEST
66 )
67 except UniqueViolation as e:
68 raise database_api_exceptions.UniqueViolationAPIException(
69 e,
70 status_code=status.HTTP_400_BAD_REQUEST
71 )
72 return constraint
73
74 def validate_name(self, name):
75 if is_identifier_too_long(name):
76 raise database_api_exceptions.IdentifierTooLong(field='name')
77 return name
78
79
80 class ForeignKeyConstraintSerializer(BaseConstraintSerializer):
81 class Meta:
82 model = Constraint
83 fields = BaseConstraintSerializer.Meta.fields + [
84 'referent_columns',
85 'referent_table',
86 'onupdate',
87 'ondelete',
88 'deferrable',
89 'match'
90 ]
91
92 referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
93 referent_table = serializers.SerializerMethodField()
94 onupdate = serializers.ChoiceField(
95 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
96 required=False,
97 allow_null=True
98 )
99 ondelete = serializers.ChoiceField(
100 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
101 required=False,
102 allow_null=True
103 )
104 deferrable = serializers.BooleanField(allow_null=True, required=False)
105 match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)
106
107 def get_referent_table(self, obj):
108 return obj.referent_columns[0].table.id
109
110 def construct_constraint_obj(self, table, data):
111 columns_attnum = [column.attnum for column in data.get('columns')]
112 referent_columns = data.get('referent_columns')
113 referent_columns_attnum = [column.attnum for column in referent_columns]
114 constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']
115 constraint_options = {
116 constraint_options_field: data[constraint_options_field]
117 for constraint_options_field in constraint_options_fields if constraint_options_field in data
118 }
119 return ForeignKeyConstraint(
120 data.get('name', None),
121 table.oid,
122 columns_attnum,
123 referent_columns[0].table.oid,
124 referent_columns_attnum,
125 constraint_options
126 )
127
128
129 class ConstraintSerializer(
130 ReadWritePolymorphicSerializerMappingMixin,
131 MathesarPolymorphicErrorMixin,
132 serializers.ModelSerializer
133 ):
134 class Meta:
135 model = Constraint
136 fields = '__all__'
137
138 serializers_mapping = {
139 'foreignkey': ForeignKeyConstraintSerializer,
140 'primary': BaseConstraintSerializer,
141 'unique': BaseConstraintSerializer,
142 }
143
144 def get_mapping_field(self, data):
145 if isinstance(data, Constraint):
146 constraint_type = data.type
147 else:
148 constraint_type = data.get('type', None)
149 assert constraint_type is not None
150 return constraint_type
151
152 def create(self, validated_data):
153 serializer = self.get_serializer_class(self.get_mapping_field(validated_data))
154 return serializer.create(validated_data)
155
156 def run_validation(self, data):
157 if referent_table := data.get('referent_table', None):
158 referent_table_name = Table.current_objects.get(id=referent_table).name
159 if any(
160 invalid_char in referent_table_name
161 for invalid_char in ('(', ')')
162 ):
163 raise InvalidTableName(
164 referent_table_name,
165 field='referent_table'
166 )
167 constraint_type = data.get('type', None)
168 if constraint_type not in self.serializers_mapping.keys():
169 raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
170 columns = data.get('columns', None)
171 if columns == []:
172 raise ConstraintColumnEmptyAPIException(field='columns')
173 return super(ConstraintSerializer, self).run_validation(data)
174
[end of mathesar/api/serializers/constraints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py
--- a/mathesar/api/serializers/constraints.py
+++ b/mathesar/api/serializers/constraints.py
@@ -139,6 +139,10 @@
'foreignkey': ForeignKeyConstraintSerializer,
'primary': BaseConstraintSerializer,
'unique': BaseConstraintSerializer,
+ # Even though 'check' & 'exclude' constraints are currently unsupported it's added here
+ # so that the app doesn't break in case these constraints are already present.
+ 'check': BaseConstraintSerializer,
+ 'exclude': BaseConstraintSerializer
}
def get_mapping_field(self, data):
@@ -165,7 +169,7 @@
field='referent_table'
)
constraint_type = data.get('type', None)
- if constraint_type not in self.serializers_mapping.keys():
+ if constraint_type not in ('foreignkey', 'primary', 'unique'):
raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
columns = data.get('columns', None)
if columns == []:
| {"golden_diff": "diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py\n--- a/mathesar/api/serializers/constraints.py\n+++ b/mathesar/api/serializers/constraints.py\n@@ -139,6 +139,10 @@\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n+ # Even though 'check' & 'exclude' constraints are currently unsupported it's added here\n+ # so that the app doesn't break in case these constraints are already present.\n+ 'check': BaseConstraintSerializer,\n+ 'exclude': BaseConstraintSerializer\n }\n \n def get_mapping_field(self, data):\n@@ -165,7 +169,7 @@\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n- if constraint_type not in self.serializers_mapping.keys():\n+ if constraint_type not in ('foreignkey', 'primary', 'unique'):\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n", "issue": "Links do not work after adding check constraint to a table\n## Description\r\n\r\nIt appears that tables with any check constraint (e.g. on an unrelated column) breaks the display of all links on that table.\r\n\r\nThe foreign key constraint is held by the underlying database, and the links are shown in the link panel on the sidebar. However, in the grid the cells just show the raw value rather than a dropdown.\r\n\r\nAnother clue: clicking table > advanced > constraints results shows the error 'Unable to fetch table constraints When making an XHR request, the server responded with an error, but the response body was not valid JSON.' Under the hood this corresponds with a 500 request to `/api/db/v0/tables/22/constraints/?limit=500`\r\n\r\n<details>\r\n<summary>\r\n500 response body\r\n</summary>\r\n\r\n```\r\n\r\n<!doctype html>\r\n<html lang=\"en\">\r\n<head>\r\n <title>Server Error (500)</title>\r\n</head>\r\n<body>\r\n <h1>Server Error (500)</h1><p></p>\r\n</body>\r\n</html>\r\n```\r\n\r\n</details>\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\nSteps:\r\n1. Add a link column to a table in Mathesar OR in another tool, add a foreign key constraint and refresh the schema in Mathesar\r\n2. [at this point, if you view the table you should see the link as a dropdown]\r\n3. In another tool, add a constraint to any column in the table\r\n4. View the table in Mathesar: The links continue to display fine\r\n5. Refresh the schema in Mathesar\r\n6. View the table in Mathesar\r\n\r\nExpected: The links display as before\r\nActual: The raw value of the links (e.g. the number for an integer id column) are shown\r\n\r\n7. In another tool, remove the constraint\r\n8. View the table in Mathesar: all the data will disappear. Adding a record also seems broken from this point, but under the hood the record is actually added.\r\n9. Refresh the schema in Mathesar\r\n10. View the table in Mathesar: all the data is present and link works correctly again\r\n\r\nVideo:\r\n\r\nThis is all mock data. Ignore bit where I try to add person_id manually at the beginning, this is force of habit \ud83d\ude05. Also, I add a record in the external tool but this is not necessary for reproduction (this is just meant to show the check constraint appears to work fine in other tools, and doesn't stop manipulating data there, so I don't think this is a problem with the other tool).\r\n\r\nhttps://github.com/centerofci/mathesar/assets/4953590/044e5e4f-ddbc-4d77-8032-c08375ff9055\r\n\r\n## Environment\r\n\r\n - OS: Host: macOS, but mathesar 0.1.2 is running inside a [podman](https://podman.io/) container. I don't think this is relevant though.\r\n - Browser: Google Chrome\r\n - Browser Version: 115.0.5790.114 (Official Build) (arm64)\r\n - Other info: Using postgres:14 container.\n", "before_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Limits the accepted related primary key values to a specific table.\n For example, if the PrimaryKeyRelatedField is instantiated with a\n Column queryset, only columns in the \"associated table\" are\n accepted. The \"associated table\" is defined by the context dict's\n `table_id` value.\n \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}]} | 3,033 | 249 |
gh_patches_debug_30087 | rasdani/github-patches | git_diff | encode__uvicorn-724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CVE-2020-7694 ANSI escape sequence injection
Looks like there is a [critical](https://vuldb.com/?id.159010) [security issue in "all versions" of uvicorn](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7694).
I presume there is a typo in the original CVE description, so "ASNI" should be "ANSI".
</issue>
<code>
[start of uvicorn/logging.py]
1 import http
2 import logging
3 import sys
4 from copy import copy
5
6 import click
7
8 TRACE_LOG_LEVEL = 5
9
10
11 class ColourizedFormatter(logging.Formatter):
12 """
13 A custom log formatter class that:
14
15 * Outputs the LOG_LEVEL with an appropriate color.
16 * If a log call includes an `extras={"color_message": ...}` it will be used
17 for formatting the output, instead of the plain text message.
18 """
19
20 level_name_colors = {
21 TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"),
22 logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
23 logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
24 logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
25 logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
26 logging.CRITICAL: lambda level_name: click.style(
27 str(level_name), fg="bright_red"
28 ),
29 }
30
31 def __init__(self, fmt=None, datefmt=None, style="%", use_colors=None):
32 if use_colors in (True, False):
33 self.use_colors = use_colors
34 else:
35 self.use_colors = sys.stdout.isatty()
36 super().__init__(fmt=fmt, datefmt=datefmt, style=style)
37
38 def color_level_name(self, level_name, level_no):
39 default = lambda level_name: str(level_name)
40 func = self.level_name_colors.get(level_no, default)
41 return func(level_name)
42
43 def should_use_colors(self):
44 return True
45
46 def formatMessage(self, record):
47 recordcopy = copy(record)
48 levelname = recordcopy.levelname
49 seperator = " " * (8 - len(recordcopy.levelname))
50 if self.use_colors:
51 levelname = self.color_level_name(levelname, recordcopy.levelno)
52 if "color_message" in recordcopy.__dict__:
53 recordcopy.msg = recordcopy.__dict__["color_message"]
54 recordcopy.__dict__["message"] = recordcopy.getMessage()
55 recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator
56 return super().formatMessage(recordcopy)
57
58
59 class DefaultFormatter(ColourizedFormatter):
60 def should_use_colors(self):
61 return sys.stderr.isatty()
62
63
64 class AccessFormatter(ColourizedFormatter):
65 status_code_colours = {
66 1: lambda code: click.style(str(code), fg="bright_white"),
67 2: lambda code: click.style(str(code), fg="green"),
68 3: lambda code: click.style(str(code), fg="yellow"),
69 4: lambda code: click.style(str(code), fg="red"),
70 5: lambda code: click.style(str(code), fg="bright_red"),
71 }
72
73 def get_client_addr(self, scope):
74 client = scope.get("client")
75 if not client:
76 return ""
77 return "%s:%d" % (client[0], client[1])
78
79 def get_path(self, scope):
80 return scope.get("root_path", "") + scope["path"]
81
82 def get_full_path(self, scope):
83 path = scope.get("root_path", "") + scope["path"]
84 query_string = scope.get("query_string", b"").decode("ascii")
85 if query_string:
86 return path + "?" + query_string
87 return path
88
89 def get_status_code(self, record):
90 status_code = record.__dict__["status_code"]
91 try:
92 status_phrase = http.HTTPStatus(status_code).phrase
93 except ValueError:
94 status_phrase = ""
95 status_and_phrase = "%s %s" % (status_code, status_phrase)
96
97 if self.use_colors:
98 default = lambda code: status_and_phrase
99 func = self.status_code_colours.get(status_code // 100, default)
100 return func(status_and_phrase)
101 return status_and_phrase
102
103 def formatMessage(self, record):
104 recordcopy = copy(record)
105 scope = recordcopy.__dict__["scope"]
106 method = scope["method"]
107 path = self.get_path(scope)
108 full_path = self.get_full_path(scope)
109 client_addr = self.get_client_addr(scope)
110 status_code = self.get_status_code(recordcopy)
111 http_version = scope["http_version"]
112 request_line = "%s %s HTTP/%s" % (method, full_path, http_version)
113 if self.use_colors:
114 request_line = click.style(request_line, bold=True)
115 recordcopy.__dict__.update(
116 {
117 "method": method,
118 "path": path,
119 "full_path": full_path,
120 "client_addr": client_addr,
121 "request_line": request_line,
122 "status_code": status_code,
123 "http_version": http_version,
124 }
125 )
126 return super().formatMessage(recordcopy)
127
[end of uvicorn/logging.py]
[start of uvicorn/protocols/utils.py]
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
[end of uvicorn/protocols/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/logging.py b/uvicorn/logging.py
--- a/uvicorn/logging.py
+++ b/uvicorn/logging.py
@@ -1,6 +1,7 @@
import http
import logging
import sys
+import urllib
from copy import copy
import click
@@ -77,14 +78,14 @@
return "%s:%d" % (client[0], client[1])
def get_path(self, scope):
- return scope.get("root_path", "") + scope["path"]
+ return urllib.parse.quote(scope.get("root_path", "") + scope["path"])
def get_full_path(self, scope):
path = scope.get("root_path", "") + scope["path"]
query_string = scope.get("query_string", b"").decode("ascii")
if query_string:
- return path + "?" + query_string
- return path
+ return urllib.parse.quote(path) + "?" + query_string
+ return urllib.parse.quote(path)
def get_status_code(self, record):
status_code = record.__dict__["status_code"]
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,4 +1,5 @@
import socket
+import urllib
def get_remote_addr(transport):
@@ -49,7 +50,9 @@
def get_path_with_query_string(scope):
- path_with_query_string = scope.get("root_path", "") + scope["path"]
+ path_with_query_string = urllib.parse.quote(
+ scope.get("root_path", "") + scope["path"]
+ )
if scope["query_string"]:
path_with_query_string = "{}?{}".format(
path_with_query_string, scope["query_string"].decode("ascii")
| {"golden_diff": "diff --git a/uvicorn/logging.py b/uvicorn/logging.py\n--- a/uvicorn/logging.py\n+++ b/uvicorn/logging.py\n@@ -1,6 +1,7 @@\n import http\n import logging\n import sys\n+import urllib\n from copy import copy\n \n import click\n@@ -77,14 +78,14 @@\n return \"%s:%d\" % (client[0], client[1])\n \n def get_path(self, scope):\n- return scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ return urllib.parse.quote(scope.get(\"root_path\", \"\") + scope[\"path\"])\n \n def get_full_path(self, scope):\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\").decode(\"ascii\")\n if query_string:\n- return path + \"?\" + query_string\n- return path\n+ return urllib.parse.quote(path) + \"?\" + query_string\n+ return urllib.parse.quote(path)\n \n def get_status_code(self, record):\n status_code = record.__dict__[\"status_code\"]\ndiff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,4 +1,5 @@\n import socket\n+import urllib\n \n \n def get_remote_addr(transport):\n@@ -49,7 +50,9 @@\n \n \n def get_path_with_query_string(scope):\n- path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ path_with_query_string = urllib.parse.quote(\n+ scope.get(\"root_path\", \"\") + scope[\"path\"]\n+ )\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n", "issue": "CVE-2020-7694 ANSI escape sequence injection\nLooks like there is a [critical](https://vuldb.com/?id.159010) [security issue in \"all versions\" of uvicorn](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7694).\r\n\r\nI presume there is a typo in the original CVE description, so \"ASNI\" should be \"ANSI\".\n", "before_files": [{"content": "import http\nimport logging\nimport sys\nfrom copy import copy\n\nimport click\n\nTRACE_LOG_LEVEL = 5\n\n\nclass ColourizedFormatter(logging.Formatter):\n \"\"\"\n A custom log formatter class that:\n\n * Outputs the LOG_LEVEL with an appropriate color.\n * If a log call includes an `extras={\"color_message\": ...}` it will be used\n for formatting the output, instead of the plain text message.\n \"\"\"\n\n level_name_colors = {\n TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg=\"blue\"),\n logging.DEBUG: lambda level_name: click.style(str(level_name), fg=\"cyan\"),\n logging.INFO: lambda level_name: click.style(str(level_name), fg=\"green\"),\n logging.WARNING: lambda level_name: click.style(str(level_name), fg=\"yellow\"),\n logging.ERROR: lambda level_name: click.style(str(level_name), fg=\"red\"),\n logging.CRITICAL: lambda level_name: click.style(\n str(level_name), fg=\"bright_red\"\n ),\n }\n\n def __init__(self, fmt=None, datefmt=None, style=\"%\", use_colors=None):\n if use_colors in (True, False):\n self.use_colors = use_colors\n else:\n self.use_colors = sys.stdout.isatty()\n super().__init__(fmt=fmt, datefmt=datefmt, style=style)\n\n def color_level_name(self, level_name, level_no):\n default = lambda level_name: str(level_name)\n func = self.level_name_colors.get(level_no, default)\n return func(level_name)\n\n def should_use_colors(self):\n return True\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n levelname = recordcopy.levelname\n seperator = \" \" * (8 - len(recordcopy.levelname))\n if self.use_colors:\n levelname = self.color_level_name(levelname, recordcopy.levelno)\n if \"color_message\" in recordcopy.__dict__:\n recordcopy.msg = recordcopy.__dict__[\"color_message\"]\n recordcopy.__dict__[\"message\"] = recordcopy.getMessage()\n recordcopy.__dict__[\"levelprefix\"] = levelname + \":\" + seperator\n return super().formatMessage(recordcopy)\n\n\nclass DefaultFormatter(ColourizedFormatter):\n def should_use_colors(self):\n return sys.stderr.isatty()\n\n\nclass AccessFormatter(ColourizedFormatter):\n status_code_colours = {\n 1: lambda code: click.style(str(code), fg=\"bright_white\"),\n 2: lambda code: click.style(str(code), fg=\"green\"),\n 3: lambda code: click.style(str(code), fg=\"yellow\"),\n 4: lambda code: click.style(str(code), fg=\"red\"),\n 5: lambda code: click.style(str(code), fg=\"bright_red\"),\n }\n\n def get_client_addr(self, scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % (client[0], client[1])\n\n def get_path(self, scope):\n return scope.get(\"root_path\", \"\") + scope[\"path\"]\n\n def get_full_path(self, scope):\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\").decode(\"ascii\")\n if query_string:\n return path + \"?\" + query_string\n return path\n\n def get_status_code(self, record):\n status_code = record.__dict__[\"status_code\"]\n try:\n status_phrase = http.HTTPStatus(status_code).phrase\n except ValueError:\n status_phrase = \"\"\n status_and_phrase = \"%s %s\" % (status_code, status_phrase)\n\n if self.use_colors:\n default = lambda code: status_and_phrase\n func = self.status_code_colours.get(status_code // 100, default)\n return func(status_and_phrase)\n return status_and_phrase\n\n def formatMessage(self, record):\n recordcopy = copy(record)\n scope = recordcopy.__dict__[\"scope\"]\n method = scope[\"method\"]\n path = self.get_path(scope)\n full_path = self.get_full_path(scope)\n client_addr = self.get_client_addr(scope)\n status_code = self.get_status_code(recordcopy)\n http_version = scope[\"http_version\"]\n request_line = \"%s %s HTTP/%s\" % (method, full_path, http_version)\n if self.use_colors:\n request_line = click.style(request_line, bold=True)\n recordcopy.__dict__.update(\n {\n \"method\": method,\n \"path\": path,\n \"full_path\": full_path,\n \"client_addr\": client_addr,\n \"request_line\": request_line,\n \"status_code\": status_code,\n \"http_version\": http_version,\n }\n )\n return super().formatMessage(recordcopy)\n", "path": "uvicorn/logging.py"}, {"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]} | 2,492 | 407 |
gh_patches_debug_19099 | rasdani/github-patches | git_diff | wagtail__wagtail-6320 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`include_block block with context` does not include variables defined in templates
Wagtails `include_block` tag is meant to match as closely as possible Jinja2's `include` tag
- so one would expect that when you include the context the local template variables are also includes. However the following code does not make `foo` available to the template that `block` renders:
```
{% with foo="bar" %}
{% include_block block with context %}
{% endwith %}
```
Looks like this is because `jinja2.nodes.ContextReference()` does not currently include template defined variables (which we use in [wagtail/core/jinja2tags.py](wagtail/core/jinja2tags.py#L39) to get the context). There is an issue relating [to this on Jinja2's repo](https://github.com/pallets/jinja/issues/860).
We could provide our own Jinja2 environment with a patched `CodeGenerator` fairly easily to include template variables (the method described in the issue above does work and django is nice enough to expose a way to load custom environments).
I'd be keen to submit a PR for this - but I'm unsure if we want to ship a patched `CodeGenerator` class? If we were to do so we'd probably want to pin the Jinja2 version in Wagtail a little more tightly to avoid breakages.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from wagtail import __version__
4 from wagtail.utils.setup import assets, check_bdist_egg, sdist
5
6
7 try:
8 from setuptools import find_packages, setup
9 except ImportError:
10 from distutils.core import setup
11
12
13 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
14 # in multiprocessing/util.py _exit_function when setup.py exits
15 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
16 try:
17 import multiprocessing # noqa
18 except ImportError:
19 pass
20
21
22 install_requires = [
23 "Django>=2.2,<3.2",
24 "django-modelcluster>=5.1,<6.0",
25 "django-taggit>=1.0,<2.0",
26 "django-treebeard>=4.2.0,<5.0",
27 "djangorestframework>=3.11.1,<4.0",
28 "django-filter>=2.2,<3.0",
29 "draftjs_exporter>=2.1.5,<3.0",
30 "Pillow>=4.0.0,<9.0.0",
31 "beautifulsoup4>=4.8,<4.9",
32 "html5lib>=0.999,<2",
33 "Willow>=1.4,<1.5",
34 "requests>=2.11.1,<3.0",
35 "l18n>=2018.5",
36 "xlsxwriter>=1.2.8,<2.0",
37 "tablib[xls,xlsx]>=0.14.0",
38 "anyascii>=0.1.5",
39 ]
40
41 # Testing dependencies
42 testing_extras = [
43 # Required for running the tests
44 'python-dateutil>=2.2',
45 'pytz>=2014.7',
46 'elasticsearch>=5.0,<6.0',
47 'Jinja2>=2.8,<3.0',
48 'boto3>=1.16,<1.17',
49 'freezegun>=0.3.8',
50 'openpyxl>=2.6.4',
51 'Unidecode>=0.04.14,<2.0',
52
53 # For coverage and PEP8 linting
54 'coverage>=3.7.0',
55 'flake8>=3.6.0',
56 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases
57 'flake8-blind-except==0.1.1',
58 'flake8-print==2.0.2',
59 'doc8==0.8.1',
60
61 # For templates linting
62 'jinjalint>=0.5',
63
64 # Pipenv hack to fix broken dependency causing CircleCI failures
65 'docutils==0.15',
66
67 # django-taggit 1.3.0 made changes to verbose_name which affect migrations;
68 # the test suite migrations correspond to >=1.3.0
69 'django-taggit>=1.3.0,<2.0',
70 ]
71
72 # Documentation dependencies
73 documentation_extras = [
74 'pyenchant>=3.1.1,<4',
75 'sphinxcontrib-spelling>=5.4.0,<6',
76 'Sphinx>=1.5.2',
77 'sphinx-autobuild>=0.6.0',
78 'sphinx_rtd_theme>=0.1.9',
79 ]
80
81 setup(
82 name='wagtail',
83 version=__version__,
84 description='A Django content management system.',
85 author='Wagtail core team + contributors',
86 author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html
87 url='https://wagtail.io/',
88 packages=find_packages(),
89 include_package_data=True,
90 license='BSD',
91 long_description="Wagtail is an open source content management \
92 system built on Django, with a strong community and commercial support. \
93 It’s focused on user experience, and offers precise control for \
94 designers and developers.\n\n\
95 For more details, see https://wagtail.io, https://docs.wagtail.io and \
96 https://github.com/wagtail/wagtail/.",
97 classifiers=[
98 'Development Status :: 5 - Production/Stable',
99 'Environment :: Web Environment',
100 'Intended Audience :: Developers',
101 'License :: OSI Approved :: BSD License',
102 'Operating System :: OS Independent',
103 'Programming Language :: Python',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Programming Language :: Python :: 3.8',
108 'Programming Language :: Python :: 3.9',
109 'Framework :: Django',
110 'Framework :: Django :: 2.2',
111 'Framework :: Django :: 3.0',
112 'Framework :: Django :: 3.1',
113 'Framework :: Wagtail',
114 'Topic :: Internet :: WWW/HTTP :: Site Management',
115 ],
116 python_requires='>=3.6',
117 install_requires=install_requires,
118 extras_require={
119 'testing': testing_extras,
120 'docs': documentation_extras
121 },
122 entry_points="""
123 [console_scripts]
124 wagtail=wagtail.bin.wagtail:main
125 """,
126 zip_safe=False,
127 cmdclass={
128 'sdist': sdist,
129 'bdist_egg': check_bdist_egg,
130 'assets': assets,
131 },
132 )
133
[end of setup.py]
[start of wagtail/core/jinja2tags.py]
1 import jinja2
2 import jinja2.nodes
3
4 from jinja2.ext import Extension
5
6 from .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version
7
8
9 class WagtailCoreExtension(Extension):
10 tags = {'include_block'}
11
12 def __init__(self, environment):
13 super().__init__(environment)
14
15 self.environment.globals.update({
16 'pageurl': jinja2.contextfunction(pageurl),
17 'slugurl': jinja2.contextfunction(slugurl),
18 'wagtail_version': wagtail_version,
19 })
20 self.environment.filters.update({
21 'richtext': richtext,
22 })
23
24 def parse(self, parser):
25 parse_method = getattr(self, 'parse_' + parser.stream.current.value)
26
27 return parse_method(parser)
28
29 def parse_include_block(self, parser):
30 lineno = next(parser.stream).lineno
31
32 args = [parser.parse_expression()]
33
34 with_context = True
35 if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):
36 with_context = next(parser.stream).value == 'with'
37 parser.stream.skip()
38
39 if with_context:
40 args.append(jinja2.nodes.ContextReference())
41 else:
42 # Actually we can just skip else branch because context arg default to None
43 args.append(jinja2.nodes.Const(None))
44
45 node = self.call_method('_include_block', args, lineno=lineno)
46 return jinja2.nodes.Output([node], lineno=lineno)
47
48 def _include_block(self, value, context=None):
49 if hasattr(value, 'render_as_block'):
50 if context:
51 new_context = context.get_all()
52 else:
53 new_context = {}
54
55 return jinja2.Markup(value.render_as_block(context=new_context))
56
57 return jinja2.Markup(value)
58
59
60 # Nicer import names
61 core = WagtailCoreExtension
62
[end of wagtail/core/jinja2tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,7 @@
'python-dateutil>=2.2',
'pytz>=2014.7',
'elasticsearch>=5.0,<6.0',
- 'Jinja2>=2.8,<3.0',
+ 'Jinja2>=2.11,<3.0',
'boto3>=1.16,<1.17',
'freezegun>=0.3.8',
'openpyxl>=2.6.4',
diff --git a/wagtail/core/jinja2tags.py b/wagtail/core/jinja2tags.py
--- a/wagtail/core/jinja2tags.py
+++ b/wagtail/core/jinja2tags.py
@@ -37,7 +37,11 @@
parser.stream.skip()
if with_context:
- args.append(jinja2.nodes.ContextReference())
+ if hasattr(jinja2.nodes, 'DerivedContextReference'):
+ # DerivedContextReference includes local variables. Introduced in Jinja 2.11
+ args.append(jinja2.nodes.DerivedContextReference())
+ else:
+ args.append(jinja2.nodes.ContextReference())
else:
# Actually we can just skip else branch because context arg default to None
args.append(jinja2.nodes.Const(None))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,7 @@\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=5.0,<6.0',\n- 'Jinja2>=2.8,<3.0',\n+ 'Jinja2>=2.11,<3.0',\n 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\ndiff --git a/wagtail/core/jinja2tags.py b/wagtail/core/jinja2tags.py\n--- a/wagtail/core/jinja2tags.py\n+++ b/wagtail/core/jinja2tags.py\n@@ -37,7 +37,11 @@\n parser.stream.skip()\n \n if with_context:\n- args.append(jinja2.nodes.ContextReference())\n+ if hasattr(jinja2.nodes, 'DerivedContextReference'):\n+ # DerivedContextReference includes local variables. Introduced in Jinja 2.11\n+ args.append(jinja2.nodes.DerivedContextReference())\n+ else:\n+ args.append(jinja2.nodes.ContextReference())\n else:\n # Actually we can just skip else branch because context arg default to None\n args.append(jinja2.nodes.Const(None))\n", "issue": "`include_block block with context` does not include variables defined in templates\nWagtails `include_block` tag is meant to match as closely as possible Jinja2's `include` tag \r\n- so one would expect that when you include the context the local template variables are also includes. However the following code does not make `foo` available to the template that `block` renders:\r\n\r\n```\r\n{% with foo=\"bar\" %}\r\n {% include_block block with context %}\r\n{% endwith %}\r\n```\r\n\r\nLooks like this is because `jinja2.nodes.ContextReference()` does not currently include template defined variables (which we use in [wagtail/core/jinja2tags.py](wagtail/core/jinja2tags.py#L39) to get the context). There is an issue relating [to this on Jinja2's repo](https://github.com/pallets/jinja/issues/860).\r\n\r\nWe could provide our own Jinja2 environment with a patched `CodeGenerator` fairly easily to include template variables (the method described in the issue above does work and django is nice enough to expose a way to load custom environments).\r\n\r\nI'd be keen to submit a PR for this - but I'm unsure if we want to ship a patched `CodeGenerator` class? If we were to do so we'd probably want to pin the Jinja2 version in Wagtail a little more tightly to avoid breakages.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=2.2,<3.2\",\n \"django-modelcluster>=5.1,<6.0\",\n \"django-taggit>=1.0,<2.0\",\n \"django-treebeard>=4.2.0,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<3.0\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n \"Pillow>=4.0.0,<9.0.0\",\n \"beautifulsoup4>=4.8,<4.9\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.4,<1.5\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"xlsxwriter>=1.2.8,<2.0\",\n \"tablib[xls,xlsx]>=0.14.0\",\n \"anyascii>=0.1.5\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=5.0,<6.0',\n 'Jinja2>=2.8,<3.0',\n 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\n 'Unidecode>=0.04.14,<2.0',\n\n # For coverage and PEP8 linting\n 'coverage>=3.7.0',\n 'flake8>=3.6.0',\n 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases\n 'flake8-blind-except==0.1.1',\n 'flake8-print==2.0.2',\n 'doc8==0.8.1',\n\n # For templates linting\n 'jinjalint>=0.5',\n\n # Pipenv hack to fix broken dependency causing CircleCI failures\n 'docutils==0.15',\n\n # django-taggit 1.3.0 made changes to verbose_name which affect migrations;\n # the test suite migrations correspond to >=1.3.0\n 'django-taggit>=1.3.0,<2.0',\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n 'pyenchant>=3.1.1,<4',\n 'sphinxcontrib-spelling>=5.4.0,<6',\n 'Sphinx>=1.5.2',\n 'sphinx-autobuild>=0.6.0',\n 'sphinx_rtd_theme>=0.1.9',\n]\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system.',\n author='Wagtail core team + contributors',\n author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html\n url='https://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.io, https://docs.wagtail.io and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n 'Framework :: Django :: 3.1',\n 'Framework :: Wagtail',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': documentation_extras\n },\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n 'sdist': sdist,\n 'bdist_egg': check_bdist_egg,\n 'assets': assets,\n },\n)\n", "path": "setup.py"}, {"content": "import jinja2\nimport jinja2.nodes\n\nfrom jinja2.ext import Extension\n\nfrom .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version\n\n\nclass WagtailCoreExtension(Extension):\n tags = {'include_block'}\n\n def __init__(self, environment):\n super().__init__(environment)\n\n self.environment.globals.update({\n 'pageurl': jinja2.contextfunction(pageurl),\n 'slugurl': jinja2.contextfunction(slugurl),\n 'wagtail_version': wagtail_version,\n })\n self.environment.filters.update({\n 'richtext': richtext,\n })\n\n def parse(self, parser):\n parse_method = getattr(self, 'parse_' + parser.stream.current.value)\n\n return parse_method(parser)\n\n def parse_include_block(self, parser):\n lineno = next(parser.stream).lineno\n\n args = [parser.parse_expression()]\n\n with_context = True\n if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):\n with_context = next(parser.stream).value == 'with'\n parser.stream.skip()\n\n if with_context:\n args.append(jinja2.nodes.ContextReference())\n else:\n # Actually we can just skip else branch because context arg default to None\n args.append(jinja2.nodes.Const(None))\n\n node = self.call_method('_include_block', args, lineno=lineno)\n return jinja2.nodes.Output([node], lineno=lineno)\n\n def _include_block(self, value, context=None):\n if hasattr(value, 'render_as_block'):\n if context:\n new_context = context.get_all()\n else:\n new_context = {}\n\n return jinja2.Markup(value.render_as_block(context=new_context))\n\n return jinja2.Markup(value)\n\n\n# Nicer import names\ncore = WagtailCoreExtension\n", "path": "wagtail/core/jinja2tags.py"}]} | 2,895 | 315 |
gh_patches_debug_36955 | rasdani/github-patches | git_diff | deis__deis-4071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dynamic typing for config response
```
POST /v1/apps/<app id>/config/
{
"cpu" {
"web": 1024
}
}
Response:
{
...
"cpu" {
"web": 1024
}
}
POST /v1/apps/<app id>/config/
{
"cpu" {
"web": "1024"
}
}
Response:
{
...
"cpu" {
"web": "1024"
}
}
```
I think that dynamic parsing for the request is fine, but having a dynamic type for a response make it very difficult to parse.
</issue>
<code>
[start of controller/api/serializers.py]
1 """
2 Classes to serialize the RESTful representation of Deis API models.
3 """
4
5 from __future__ import unicode_literals
6
7 import json
8 import re
9
10 from django.conf import settings
11 from django.contrib.auth.models import User
12 from django.utils import timezone
13 from rest_framework import serializers
14 from rest_framework.validators import UniqueTogetherValidator
15
16 from api import models
17
18
19 PROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')
20 MEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)
21 CPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')
22 TAGKEY_MATCH = re.compile(r'^[a-z]+$')
23 TAGVAL_MATCH = re.compile(r'^\w+$')
24
25
26 class JSONFieldSerializer(serializers.Field):
27 def to_representation(self, obj):
28 return obj
29
30 def to_internal_value(self, data):
31 try:
32 val = json.loads(data)
33 except TypeError:
34 val = data
35 return val
36
37
38 class ModelSerializer(serializers.ModelSerializer):
39
40 uuid = serializers.ReadOnlyField()
41
42 def get_validators(self):
43 """
44 Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.
45
46 See https://github.com/deis/deis/pull/2898#discussion_r23105147
47 """
48 validators = super(ModelSerializer, self).get_validators()
49 for v in validators:
50 if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:
51 validators.remove(v)
52 return validators
53
54
55 class UserSerializer(serializers.ModelSerializer):
56 class Meta:
57 model = User
58 fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',
59 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',
60 'is_active']
61 read_only_fields = ['is_superuser', 'is_staff', 'groups',
62 'user_permissions', 'last_login', 'date_joined', 'is_active']
63 extra_kwargs = {'password': {'write_only': True}}
64
65 def create(self, validated_data):
66 now = timezone.now()
67 user = User(
68 email=validated_data.get('email'),
69 username=validated_data.get('username'),
70 last_login=now,
71 date_joined=now,
72 is_active=True
73 )
74 if validated_data.get('first_name'):
75 user.first_name = validated_data['first_name']
76 if validated_data.get('last_name'):
77 user.last_name = validated_data['last_name']
78 user.set_password(validated_data['password'])
79 # Make the first signup an admin / superuser
80 if not User.objects.filter(is_superuser=True).exists():
81 user.is_superuser = user.is_staff = True
82 user.save()
83 return user
84
85
86 class AdminUserSerializer(serializers.ModelSerializer):
87 """Serialize admin status for a User model."""
88
89 class Meta:
90 model = User
91 fields = ['username', 'is_superuser']
92 read_only_fields = ['username']
93
94
95 class AppSerializer(ModelSerializer):
96 """Serialize a :class:`~api.models.App` model."""
97
98 owner = serializers.ReadOnlyField(source='owner.username')
99 structure = JSONFieldSerializer(required=False)
100 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
101 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
102
103 class Meta:
104 """Metadata options for a :class:`AppSerializer`."""
105 model = models.App
106 fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']
107 read_only_fields = ['uuid']
108
109
110 class BuildSerializer(ModelSerializer):
111 """Serialize a :class:`~api.models.Build` model."""
112
113 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
114 owner = serializers.ReadOnlyField(source='owner.username')
115 procfile = JSONFieldSerializer(required=False)
116 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
117 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
118
119 class Meta:
120 """Metadata options for a :class:`BuildSerializer`."""
121 model = models.Build
122 fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',
123 'updated', 'uuid']
124 read_only_fields = ['uuid']
125
126
127 class ConfigSerializer(ModelSerializer):
128 """Serialize a :class:`~api.models.Config` model."""
129
130 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
131 owner = serializers.ReadOnlyField(source='owner.username')
132 values = JSONFieldSerializer(required=False)
133 memory = JSONFieldSerializer(required=False)
134 cpu = JSONFieldSerializer(required=False)
135 tags = JSONFieldSerializer(required=False)
136 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
137 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
138
139 class Meta:
140 """Metadata options for a :class:`ConfigSerializer`."""
141 model = models.Config
142
143 def validate_memory(self, value):
144 for k, v in value.viewitems():
145 if v is None: # use NoneType to unset a value
146 continue
147 if not re.match(PROCTYPE_MATCH, k):
148 raise serializers.ValidationError("Process types can only contain [a-z]")
149 if not re.match(MEMLIMIT_MATCH, str(v)):
150 raise serializers.ValidationError(
151 "Limit format: <number><unit>, where unit = B, K, M or G")
152 return value
153
154 def validate_cpu(self, value):
155 for k, v in value.viewitems():
156 if v is None: # use NoneType to unset a value
157 continue
158 if not re.match(PROCTYPE_MATCH, k):
159 raise serializers.ValidationError("Process types can only contain [a-z]")
160 shares = re.match(CPUSHARE_MATCH, str(v))
161 if not shares:
162 raise serializers.ValidationError("CPU shares must be an integer")
163 for v in shares.groupdict().viewvalues():
164 try:
165 i = int(v)
166 except ValueError:
167 raise serializers.ValidationError("CPU shares must be an integer")
168 if i > 1024 or i < 0:
169 raise serializers.ValidationError("CPU shares must be between 0 and 1024")
170 return value
171
172 def validate_tags(self, value):
173 for k, v in value.viewitems():
174 if v is None: # use NoneType to unset a value
175 continue
176 if not re.match(TAGKEY_MATCH, k):
177 raise serializers.ValidationError("Tag keys can only contain [a-z]")
178 if not re.match(TAGVAL_MATCH, str(v)):
179 raise serializers.ValidationError("Invalid tag value")
180 return value
181
182
183 class ReleaseSerializer(ModelSerializer):
184 """Serialize a :class:`~api.models.Release` model."""
185
186 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
187 owner = serializers.ReadOnlyField(source='owner.username')
188 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
189 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
190
191 class Meta:
192 """Metadata options for a :class:`ReleaseSerializer`."""
193 model = models.Release
194
195
196 class ContainerSerializer(ModelSerializer):
197 """Serialize a :class:`~api.models.Container` model."""
198
199 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
200 owner = serializers.ReadOnlyField(source='owner.username')
201 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
202 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
203 release = serializers.SerializerMethodField()
204
205 class Meta:
206 """Metadata options for a :class:`ContainerSerializer`."""
207 model = models.Container
208 fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']
209
210 def get_release(self, obj):
211 return "v{}".format(obj.release.version)
212
213
214 class KeySerializer(ModelSerializer):
215 """Serialize a :class:`~api.models.Key` model."""
216
217 owner = serializers.ReadOnlyField(source='owner.username')
218 fingerprint = serializers.CharField(read_only=True)
219 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
220 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
221
222 class Meta:
223 """Metadata options for a KeySerializer."""
224 model = models.Key
225
226
227 class DomainSerializer(ModelSerializer):
228 """Serialize a :class:`~api.models.Domain` model."""
229
230 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
231 owner = serializers.ReadOnlyField(source='owner.username')
232 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
233 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
234
235 class Meta:
236 """Metadata options for a :class:`DomainSerializer`."""
237 model = models.Domain
238 fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']
239
240 def validate_domain(self, value):
241 """
242 Check that the hostname is valid
243 """
244 if len(value) > 255:
245 raise serializers.ValidationError('Hostname must be 255 characters or less.')
246 if value[-1:] == ".":
247 value = value[:-1] # strip exactly one dot from the right, if present
248 labels = value.split('.')
249 if 'xip.io' in value:
250 return value
251 if labels[0] == '*':
252 raise serializers.ValidationError(
253 'Adding a wildcard subdomain is currently not supported.')
254 allowed = re.compile("^(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
255 for label in labels:
256 match = allowed.match(label)
257 if not match or '--' in label or label.isdigit() or \
258 len(labels) == 1 and any(char.isdigit() for char in label):
259 raise serializers.ValidationError('Hostname does not look valid.')
260 if models.Domain.objects.filter(domain=value).exists():
261 raise serializers.ValidationError(
262 "The domain {} is already in use by another app".format(value))
263 return value
264
265
266 class CertificateSerializer(ModelSerializer):
267 """Serialize a :class:`~api.models.Cert` model."""
268
269 owner = serializers.ReadOnlyField(source='owner.username')
270 expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
271 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
272 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
273
274 class Meta:
275 """Metadata options for a DomainCertSerializer."""
276 model = models.Certificate
277 extra_kwargs = {'certificate': {'write_only': True},
278 'key': {'write_only': True},
279 'common_name': {'required': False}}
280 read_only_fields = ['expires', 'created', 'updated']
281
282
283 class PushSerializer(ModelSerializer):
284 """Serialize a :class:`~api.models.Push` model."""
285
286 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
287 owner = serializers.ReadOnlyField(source='owner.username')
288 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
289 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
290
291 class Meta:
292 """Metadata options for a :class:`PushSerializer`."""
293 model = models.Push
294 fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',
295 'ssh_connection', 'ssh_original_command', 'created', 'updated']
296
[end of controller/api/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/controller/api/serializers.py b/controller/api/serializers.py
--- a/controller/api/serializers.py
+++ b/controller/api/serializers.py
@@ -24,10 +24,16 @@
class JSONFieldSerializer(serializers.Field):
+ """
+ A Django REST framework serializer for JSON data.
+ """
+
def to_representation(self, obj):
+ """Serialize the field's JSON data, for read operations."""
return obj
def to_internal_value(self, data):
+ """Deserialize the field's JSON data, for write operations."""
try:
val = json.loads(data)
except TypeError:
@@ -35,6 +41,41 @@
return val
+class JSONIntFieldSerializer(JSONFieldSerializer):
+ """
+ A JSON serializer that coerces its data to integers.
+ """
+
+ def to_internal_value(self, data):
+ """Deserialize the field's JSON integer data."""
+ field = super(JSONIntFieldSerializer, self).to_internal_value(data)
+
+ for k, v in field.viewitems():
+ if v is not None: # NoneType is used to unset a value
+ try:
+ field[k] = int(v)
+ except ValueError:
+ field[k] = v
+ # Do nothing, the validator will catch this later
+ return field
+
+
+class JSONStringFieldSerializer(JSONFieldSerializer):
+ """
+ A JSON serializer that coerces its data to strings.
+ """
+
+ def to_internal_value(self, data):
+ """Deserialize the field's JSON string data."""
+ field = super(JSONStringFieldSerializer, self).to_internal_value(data)
+
+ for k, v in field.viewitems():
+ if v is not None: # NoneType is used to unset a value
+ field[k] = unicode(v)
+
+ return field
+
+
class ModelSerializer(serializers.ModelSerializer):
uuid = serializers.ReadOnlyField()
@@ -129,10 +170,10 @@
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
- values = JSONFieldSerializer(required=False)
- memory = JSONFieldSerializer(required=False)
- cpu = JSONFieldSerializer(required=False)
- tags = JSONFieldSerializer(required=False)
+ values = JSONStringFieldSerializer(required=False)
+ memory = JSONStringFieldSerializer(required=False)
+ cpu = JSONIntFieldSerializer(required=False)
+ tags = JSONStringFieldSerializer(required=False)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
| {"golden_diff": "diff --git a/controller/api/serializers.py b/controller/api/serializers.py\n--- a/controller/api/serializers.py\n+++ b/controller/api/serializers.py\n@@ -24,10 +24,16 @@\n \n \n class JSONFieldSerializer(serializers.Field):\n+ \"\"\"\n+ A Django REST framework serializer for JSON data.\n+ \"\"\"\n+\n def to_representation(self, obj):\n+ \"\"\"Serialize the field's JSON data, for read operations.\"\"\"\n return obj\n \n def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON data, for write operations.\"\"\"\n try:\n val = json.loads(data)\n except TypeError:\n@@ -35,6 +41,41 @@\n return val\n \n \n+class JSONIntFieldSerializer(JSONFieldSerializer):\n+ \"\"\"\n+ A JSON serializer that coerces its data to integers.\n+ \"\"\"\n+\n+ def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON integer data.\"\"\"\n+ field = super(JSONIntFieldSerializer, self).to_internal_value(data)\n+\n+ for k, v in field.viewitems():\n+ if v is not None: # NoneType is used to unset a value\n+ try:\n+ field[k] = int(v)\n+ except ValueError:\n+ field[k] = v\n+ # Do nothing, the validator will catch this later\n+ return field\n+\n+\n+class JSONStringFieldSerializer(JSONFieldSerializer):\n+ \"\"\"\n+ A JSON serializer that coerces its data to strings.\n+ \"\"\"\n+\n+ def to_internal_value(self, data):\n+ \"\"\"Deserialize the field's JSON string data.\"\"\"\n+ field = super(JSONStringFieldSerializer, self).to_internal_value(data)\n+\n+ for k, v in field.viewitems():\n+ if v is not None: # NoneType is used to unset a value\n+ field[k] = unicode(v)\n+\n+ return field\n+\n+\n class ModelSerializer(serializers.ModelSerializer):\n \n uuid = serializers.ReadOnlyField()\n@@ -129,10 +170,10 @@\n \n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n- values = JSONFieldSerializer(required=False)\n- memory = JSONFieldSerializer(required=False)\n- cpu = JSONFieldSerializer(required=False)\n- tags = JSONFieldSerializer(required=False)\n+ values = JSONStringFieldSerializer(required=False)\n+ memory = JSONStringFieldSerializer(required=False)\n+ cpu = JSONIntFieldSerializer(required=False)\n+ tags = JSONStringFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n", "issue": "Dynamic typing for config response \n```\nPOST /v1/apps/<app id>/config/\n\n{\n \"cpu\" {\n \"web\": 1024\n }\n}\n\nResponse:\n\n{\n ...\n \"cpu\" {\n \"web\": 1024\n }\n}\n\nPOST /v1/apps/<app id>/config/\n\n{\n \"cpu\" {\n \"web\": \"1024\"\n }\n}\n\nResponse:\n\n{\n ...\n \"cpu\" {\n \"web\": \"1024\"\n }\n}\n```\n\nI think that dynamic parsing for the request is fine, but having a dynamic type for a response make it very difficult to parse.\n\n", "before_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n def to_representation(self, obj):\n return obj\n\n def to_internal_value(self, data):\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONFieldSerializer(required=False)\n memory = JSONFieldSerializer(required=False)\n cpu = JSONFieldSerializer(required=False)\n tags = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().viewvalues():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.viewitems():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n fingerprint = serializers.CharField(read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n if len(value) > 255:\n raise serializers.ValidationError('Hostname must be 255 characters or less.')\n if value[-1:] == \".\":\n value = value[:-1] # strip exactly one dot from the right, if present\n labels = value.split('.')\n if 'xip.io' in value:\n return value\n if labels[0] == '*':\n raise serializers.ValidationError(\n 'Adding a wildcard subdomain is currently not supported.')\n allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n for label in labels:\n match = allowed.match(label)\n if not match or '--' in label or label.isdigit() or \\\n len(labels) == 1 and any(char.isdigit() for char in label):\n raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n return value\n\n\nclass CertificateSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Cert` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a DomainCertSerializer.\"\"\"\n model = models.Certificate\n extra_kwargs = {'certificate': {'write_only': True},\n 'key': {'write_only': True},\n 'common_name': {'required': False}}\n read_only_fields = ['expires', 'created', 'updated']\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}]} | 3,976 | 597 |
gh_patches_debug_1424 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.2.0 CLI reports version 2.1.2dev0
* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)
* Template project url: n/a
* Python version: 3.11
* Operating System: linux
### Description:
Get the accurate version of cookiecutter from the CLI
### What I've run:
```bash
cookiecutter --version
Cookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])
```
Would be a one-line fix, but ideally would be always be sourced from exactly one place:
- `setup.py` and `importlib_metadata`
- `__init__.py`
- a `VERSION` file
</issue>
<code>
[start of setup.py]
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.2.2.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'python-slugify>=4.0.0',
15 'requests>=2.23.0',
16 'arrow',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 project_urls={
33 "Documentation": "https://cookiecutter.readthedocs.io",
34 "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
35 "Discord": "https://discord.gg/9BrxzPKuEW",
36 },
37 packages=['cookiecutter'],
38 package_dir={'cookiecutter': 'cookiecutter'},
39 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
40 include_package_data=True,
41 python_requires='>=3.7',
42 install_requires=requirements,
43 license='BSD',
44 zip_safe=False,
45 classifiers=[
46 "Development Status :: 5 - Production/Stable",
47 "Environment :: Console",
48 "Intended Audience :: Developers",
49 "Natural Language :: English",
50 "License :: OSI Approved :: BSD License",
51 "Programming Language :: Python :: 3 :: Only",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.7",
54 "Programming Language :: Python :: 3.8",
55 "Programming Language :: Python :: 3.9",
56 "Programming Language :: Python :: 3.10",
57 "Programming Language :: Python :: 3.11",
58 "Programming Language :: Python :: Implementation :: CPython",
59 "Programming Language :: Python :: Implementation :: PyPy",
60 "Programming Language :: Python",
61 "Topic :: Software Development",
62 ],
63 keywords=[
64 "cookiecutter",
65 "Python",
66 "projects",
67 "project templates",
68 "Jinja2",
69 "skeleton",
70 "scaffolding",
71 "project directory",
72 "package",
73 "packaging",
74 ],
75 )
76
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
"""cookiecutter distutils configuration."""
from setuptools import setup
-version = "2.2.2.dev0"
+version = "2.2.2"
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,7 @@\n \"\"\"cookiecutter distutils configuration.\"\"\"\n from setuptools import setup\n \n-version = \"2.2.2.dev0\"\n+version = \"2.2.2\"\n \n with open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n", "issue": "2.2.0 CLI reports version 2.1.2dev0 \n* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)\r\n* Template project url: n/a\r\n* Python version: 3.11\r\n* Operating System: linux\r\n\r\n### Description:\r\n\r\nGet the accurate version of cookiecutter from the CLI\r\n\r\n### What I've run:\r\n\r\n```bash\r\ncookiecutter --version\r\nCookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])\r\n```\r\n\r\nWould be a one-line fix, but ideally would be always be sourced from exactly one place:\r\n- `setup.py` and `importlib_metadata`\r\n- `__init__.py`\r\n- a `VERSION` file\n", "before_files": [{"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,484 | 88 |
gh_patches_debug_9792 | rasdani/github-patches | git_diff | fossasia__open-event-server-5593 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Users are given various roles when they are deleted
**Describe the bug**
Users are given various roles when they are deleted
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots**

**Additional context**
I am working on this issue
</issue>
<code>
[start of app/api/users.py]
1 import base64
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6 from sqlalchemy.orm.exc import NoResultFound
7 import urllib.error
8
9 from app import get_settings
10 from app.api.bootstrap import api
11 from app.api.helpers.db import safe_query, get_count
12 from app.api.helpers.exceptions import ConflictException, UnprocessableEntity, ForbiddenException
13 from app.api.helpers.files import create_save_image_sizes, make_frontend_url
14 from app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.permissions import is_user_itself
17 from app.api.helpers.utilities import get_serializer, str_generator
18 from app.api.schema.users import UserSchema, UserSchemaPublic
19 from app.models import db
20 from app.models.access_code import AccessCode
21 from app.models.discount_code import DiscountCode
22 from app.models.email_notification import EmailNotification
23 from app.models.event_invoice import EventInvoice
24 from app.models.feedback import Feedback
25 from app.models.mail import USER_REGISTER_WITH_PASSWORD, PASSWORD_RESET_AND_VERIFY
26 from app.models.notification import Notification
27 from app.models.session import Session
28 from app.models.speaker import Speaker
29 from app.models.ticket_holder import TicketHolder
30 from app.models.user import User
31 from app.models.users_events_role import UsersEventsRoles
32
33 user_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')
34
35
36 class UserList(ResourceList):
37 """
38 List and create Users
39 """
40
41 def before_create_object(self, data, view_kwargs):
42 """
43 method to check if there is an existing user with same email which is received in data to create a new user
44 :param data:
45 :param view_kwargs:
46 :return:
47 """
48 if db.session.query(User.id).filter_by(email=data['email']).scalar() is not None:
49 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
50
51 def after_create_object(self, user, data, view_kwargs):
52 """
53 method to send-
54 email notification
55 mail link for register verification
56 add image urls
57 :param user:
58 :param data:
59 :param view_kwargs:
60 :return:
61 """
62
63 if user.was_registered_with_order:
64 link = make_frontend_url('/reset-password', {'token': user.reset_password})
65 send_email_with_action(user, PASSWORD_RESET_AND_VERIFY, app_name=get_settings()['app_name'],
66 email=user.email, link=link)
67 else:
68 s = get_serializer()
69 hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')
70 link = make_frontend_url('/verify'.format(id=user.id), {'token': hash})
71 send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],
72 email=user.email)
73 send_email_confirmation(user.email, link)
74
75 if data.get('original_image_url'):
76 try:
77 uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)
78 except (urllib.error.HTTPError, urllib.error.URLError):
79 raise UnprocessableEntity(
80 {'source': 'attributes/original-image-url'}, 'Invalid Image URL'
81 )
82 uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']
83 del uploaded_images['large_image_url']
84 self.session.query(User).filter_by(id=user.id).update(uploaded_images)
85
86 decorators = (api.has_permission('is_admin', methods="GET"),)
87 schema = UserSchema
88 data_layer = {'session': db.session,
89 'model': User,
90 'methods': {
91 'before_create_object': before_create_object,
92 'after_create_object': after_create_object
93 }}
94
95
96 class UserDetail(ResourceDetail):
97 """
98 User detail by id
99 """
100
101 def before_get(self, args, kwargs):
102
103 if current_user.is_admin or current_user.is_super_admin or current_user:
104 self.schema = UserSchema
105 else:
106 self.schema = UserSchemaPublic
107
108 def before_get_object(self, view_kwargs):
109 """
110 before get method for user object
111 :param view_kwargs:
112 :return:
113 """
114 if view_kwargs.get('notification_id') is not None:
115 notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')
116 if notification.user_id is not None:
117 view_kwargs['id'] = notification.user_id
118 else:
119 view_kwargs['id'] = None
120
121 if view_kwargs.get('feedback_id') is not None:
122 feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')
123 if feedback.user_id is not None:
124 view_kwargs['id'] = feedback.user_id
125 else:
126 view_kwargs['id'] = None
127
128 if view_kwargs.get('attendee_id') is not None:
129 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
130 if attendee.user is not None:
131 if (not has_access('is_user_itself',
132 user_id=attendee.user.id) or not has_access('is_coorganizer',
133 event_id=attendee.event_id)):
134 raise ForbiddenException({'source': ''}, 'Access Forbidden')
135 view_kwargs['id'] = attendee.user.id
136 else:
137 view_kwargs['id'] = None
138
139 if view_kwargs.get('event_invoice_id') is not None:
140 event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')
141 if event_invoice.user_id is not None:
142 view_kwargs['id'] = event_invoice.user_id
143 else:
144 view_kwargs['id'] = None
145
146 if view_kwargs.get('users_events_role_id') is not None:
147 users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],
148 'users_events_role_id')
149 if users_events_role.user_id is not None:
150 view_kwargs['id'] = users_events_role.user_id
151 else:
152 view_kwargs['id'] = None
153
154 if view_kwargs.get('speaker_id') is not None:
155 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
156 if speaker.user_id is not None:
157 view_kwargs['id'] = speaker.user_id
158 else:
159 view_kwargs['id'] = None
160
161 if view_kwargs.get('session_id') is not None:
162 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
163 if session.creator_id is not None:
164 view_kwargs['id'] = session.creator_id
165 else:
166 view_kwargs['id'] = None
167
168 if view_kwargs.get('access_code_id') is not None:
169 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
170 if access_code.marketer_id is not None:
171 view_kwargs['id'] = access_code.marketer_id
172 else:
173 view_kwargs['id'] = None
174
175 if view_kwargs.get('discount_code_id') is not None:
176 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
177 if discount_code.marketer_id is not None:
178 view_kwargs['id'] = discount_code.marketer_id
179 else:
180 view_kwargs['id'] = None
181
182 if view_kwargs.get('email_notification_id') is not None:
183 email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],
184 'email_notification_id')
185 if email_notification.user_id is not None:
186 view_kwargs['id'] = email_notification.user_id
187 else:
188 view_kwargs['id'] = None
189
190 def before_update_object(self, user, data, view_kwargs):
191 if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:
192 try:
193 uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)
194 except (urllib.error.HTTPError, urllib.error.URLError):
195 raise UnprocessableEntity(
196 {'source': 'attributes/original-image-url'}, 'Invalid Image URL'
197 )
198 data['original_image_url'] = uploaded_images['original_image_url']
199 data['small_image_url'] = uploaded_images['thumbnail_image_url']
200 data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']
201 data['icon_image_url'] = uploaded_images['icon_image_url']
202
203 if data.get('email') and data['email'] != user.email:
204 try:
205 db.session.query(User).filter_by(email=data['email']).one()
206 except NoResultFound:
207 view_kwargs['email_changed'] = user.email
208 else:
209 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
210
211 if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
212 user.is_admin = not user.is_admin
213
214 if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:
215 user.is_sales_admin = not user.is_sales_admin
216
217 if has_access('is_admin') and data.get('is_marketer') != user.is_marketer:
218 user.is_marketer = not user.is_marketer
219
220 def after_update_object(self, user, data, view_kwargs):
221 """
222 method to mail user about email change
223 :param user:
224 :param data:
225 :param view_kwargs:
226 :return:
227 """
228 if view_kwargs.get('email_changed'):
229 send_email_change_user_email(user, view_kwargs.get('email_changed'))
230
231 decorators = (api.has_permission('is_user_itself', fetch="user_id,id", fetch_as="user_id",
232 model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,
233 DiscountCode, EmailNotification, Speaker, User],
234 fetch_key_url="notification_id, feedback_id, users_events_role_id, session_id, \
235 event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id",
236 leave_if=lambda a: a.get('attendee_id')),)
237 schema = UserSchema
238 data_layer = {'session': db.session,
239 'model': User,
240 'methods': {
241 'before_get_object': before_get_object,
242 'before_update_object': before_update_object,
243 'after_update_object': after_update_object
244 }}
245
246
247 class UserRelationship(ResourceRelationship):
248 """
249 User Relationship
250 """
251 decorators = (is_user_itself,)
252 schema = UserSchema
253 data_layer = {'session': db.session,
254 'model': User}
255
256
257 @user_misc_routes.route('/users/checkEmail', methods=['POST'])
258 def is_email_available():
259 email = request.json.get('email', None)
260 if email:
261 if get_count(db.session.query(User).filter_by(email=email)):
262 return jsonify(
263 result="False"
264 )
265 else:
266 return jsonify(
267 result="True"
268 )
269 else:
270 abort(
271 make_response(jsonify(error="Email field missing"), 422)
272 )
273
[end of app/api/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/users.py b/app/api/users.py
--- a/app/api/users.py
+++ b/app/api/users.py
@@ -208,7 +208,7 @@
else:
raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
- if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
+ if has_access('is_super_admin') and data.get('is_admin') and data.get('is_admin') != user.is_admin:
user.is_admin = not user.is_admin
if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:
| {"golden_diff": "diff --git a/app/api/users.py b/app/api/users.py\n--- a/app/api/users.py\n+++ b/app/api/users.py\n@@ -208,7 +208,7 @@\n else:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n \n- if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n+ if has_access('is_super_admin') and data.get('is_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n \n if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:\n", "issue": "Users are given various roles when they are deleted\n**Describe the bug**\r\nUsers are given various roles when they are deleted\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots**\r\n\r\n\r\n**Additional context**\r\nI am working on this issue\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom sqlalchemy.orm.exc import NoResultFound\nimport urllib.error\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException, UnprocessableEntity, ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD, PASSWORD_RESET_AND_VERIFY\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email']).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n\n if user.was_registered_with_order:\n link = make_frontend_url('/reset-password', {'token': user.reset_password})\n send_email_with_action(user, PASSWORD_RESET_AND_VERIFY, app_name=get_settings()['app_name'],\n email=user.email, link=link)\n else:\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n try:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'speaker-image', user.id)\n except (urllib.error.HTTPError, urllib.error.URLError):\n raise UnprocessableEntity(\n {'source': 'attributes/original-image-url'}, 'Invalid Image URL'\n )\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n try:\n db.session.query(User).filter_by(email=data['email']).one()\n except NoResultFound:\n view_kwargs['email_changed'] = user.email\n else:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n if has_access('is_admin') and data.get('is_sales_admin') != user.is_sales_admin:\n user.is_sales_admin = not user.is_sales_admin\n\n if has_access('is_admin') and data.get('is_marketer') != user.is_marketer:\n user.is_marketer = not user.is_marketer\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself,)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}]} | 3,780 | 147 |
gh_patches_debug_62 | rasdani/github-patches | git_diff | edgedb__edgedb-2139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better syntax errors for substitution tokens
Currently as of 1.0-alpha.8+dev.5341.g66ec73494 it fails with InternalServerError:
```
edgedb> SELECT \(x);
ERROR: InternalServerError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/work/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/work/edb/server/compiler/compiler.py", line 1935, in compile
return self._compile(ctx=ctx, source=source)
File "/work/edb/server/compiler/compiler.py", line 1487, in _compile
return self._try_compile(ctx=ctx, source=source)
File "/work/edb/server/compiler/compiler.py", line 1519, in _try_compile
statements = edgeql.parse_block(source)
File "/work/edb/edgeql/parser/__init__.py", line 69, in parse_block
return parser.parse(source)
File "/work/edb/common/parsing.py", line 401, in parse
token = self.process_lex_token(mod, tok)
File "/work/edb/common/parsing.py", line 390, in process_lex_token
return mod.TokenMeta.for_lex_token(tok.kind())(
File "/work/edb/common/parsing.py", line 100, in for_lex_token
return mcls.token_map[mcls, token]
KeyError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')
```
This bug appeared after #2131
</issue>
<code>
[start of edb/edgeql/parser/grammar/tokens.py]
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21
22 import re
23 import sys
24 import types
25
26 from edb.common import parsing
27
28 from . import keywords
29 from . import precedence
30 from . import lexer
31
32
33 clean_string = re.compile(r"'(?:\s|\n)+'")
34 string_quote = re.compile(lexer.re_dquote)
35
36
37 class TokenMeta(parsing.TokenMeta):
38 pass
39
40
41 class Token(parsing.Token, metaclass=TokenMeta,
42 precedence_class=precedence.PrecedenceMeta):
43 pass
44
45
46 class T_DOT(Token, lextoken='.'):
47 pass
48
49
50 class T_DOTBW(Token, lextoken='.<'):
51 pass
52
53
54 class T_LBRACKET(Token, lextoken='['):
55 pass
56
57
58 class T_RBRACKET(Token, lextoken=']'):
59 pass
60
61
62 class T_LPAREN(Token, lextoken='('):
63 pass
64
65
66 class T_RPAREN(Token, lextoken=')'):
67 pass
68
69
70 class T_LBRACE(Token, lextoken='{'):
71 pass
72
73
74 class T_RBRACE(Token, lextoken='}'):
75 pass
76
77
78 class T_DOUBLECOLON(Token, lextoken='::'):
79 pass
80
81
82 class T_DOUBLEQMARK(Token, lextoken='??'):
83 pass
84
85
86 class T_COLON(Token, lextoken=':'):
87 pass
88
89
90 class T_SEMICOLON(Token, lextoken=';'):
91 pass
92
93
94 class T_COMMA(Token, lextoken=','):
95 pass
96
97
98 class T_PLUS(Token, lextoken='+'):
99 pass
100
101
102 class T_DOUBLEPLUS(Token, lextoken='++'):
103 pass
104
105
106 class T_MINUS(Token, lextoken='-'):
107 pass
108
109
110 class T_STAR(Token, lextoken='*'):
111 pass
112
113
114 class T_SLASH(Token, lextoken='/'):
115 pass
116
117
118 class T_DOUBLESLASH(Token, lextoken='//'):
119 pass
120
121
122 class T_PERCENT(Token, lextoken='%'):
123 pass
124
125
126 class T_CIRCUMFLEX(Token, lextoken='^'):
127 pass
128
129
130 class T_AT(Token, lextoken='@'):
131 pass
132
133
134 class T_ARGUMENT(Token):
135 pass
136
137
138 class T_ASSIGN(Token):
139 pass
140
141
142 class T_ADDASSIGN(Token):
143 pass
144
145
146 class T_REMASSIGN(Token):
147 pass
148
149
150 class T_ARROW(Token):
151 pass
152
153
154 class T_LANGBRACKET(Token, lextoken='<'):
155 pass
156
157
158 class T_RANGBRACKET(Token, lextoken='>'):
159 pass
160
161
162 class T_EQUALS(Token, lextoken='='):
163 pass
164
165
166 class T_AMPER(Token, lextoken='&'):
167 pass
168
169
170 class T_PIPE(Token, lextoken='|'):
171 pass
172
173
174 class T_NAMEDONLY(Token):
175 pass
176
177
178 class T_SETANNOTATION(Token):
179 pass
180
181
182 class T_SETTYPE(Token):
183 pass
184
185
186 class T_ICONST(Token):
187 pass
188
189
190 class T_NICONST(Token):
191 pass
192
193
194 class T_FCONST(Token):
195 pass
196
197
198 class T_NFCONST(Token):
199 pass
200
201
202 class T_BCONST(Token):
203 pass
204
205
206 class T_SCONST(Token):
207 pass
208
209
210 class T_RSCONST(Token):
211 pass
212
213
214 class T_IDENT(Token):
215 pass
216
217
218 class T_OP(Token):
219 pass
220
221
222 class T_EOF(Token):
223 pass
224
225
226 def _gen_keyword_tokens():
227 # Define keyword tokens
228
229 mod = sys.modules[__name__]
230
231 def clsexec(ns):
232 ns['__module__'] = __name__
233 return ns
234
235 for token, _ in keywords.edgeql_keywords.values():
236 clsname = 'T_{}'.format(token)
237 clskwds = dict(metaclass=parsing.TokenMeta, token=token)
238 cls = types.new_class(clsname, (Token,), clskwds, clsexec)
239 setattr(mod, clsname, cls)
240
241
242 _gen_keyword_tokens()
243
[end of edb/edgeql/parser/grammar/tokens.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/edb/edgeql/parser/grammar/tokens.py b/edb/edgeql/parser/grammar/tokens.py
--- a/edb/edgeql/parser/grammar/tokens.py
+++ b/edb/edgeql/parser/grammar/tokens.py
@@ -219,6 +219,10 @@
pass
+class T_SUBSTITUTION(Token):
+ pass
+
+
class T_EOF(Token):
pass
| {"golden_diff": "diff --git a/edb/edgeql/parser/grammar/tokens.py b/edb/edgeql/parser/grammar/tokens.py\n--- a/edb/edgeql/parser/grammar/tokens.py\n+++ b/edb/edgeql/parser/grammar/tokens.py\n@@ -219,6 +219,10 @@\n pass\n \n \n+class T_SUBSTITUTION(Token):\n+ pass\n+\n+\n class T_EOF(Token):\n pass\n", "issue": "Better syntax errors for substitution tokens\nCurrently as of 1.0-alpha.8+dev.5341.g66ec73494 it fails with InternalServerError:\r\n```\r\nedgedb> SELECT \\(x);\r\nERROR: InternalServerError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')\r\n Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md\r\n Server traceback:\r\n Traceback (most recent call last):\r\n File \"/work/edb/server/procpool/worker.py\", line 75, in worker\r\n res = await meth(*args)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1935, in compile\r\n return self._compile(ctx=ctx, source=source)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1487, in _compile\r\n return self._try_compile(ctx=ctx, source=source)\r\n File \"/work/edb/server/compiler/compiler.py\", line 1519, in _try_compile\r\n statements = edgeql.parse_block(source)\r\n File \"/work/edb/edgeql/parser/__init__.py\", line 69, in parse_block\r\n return parser.parse(source)\r\n File \"/work/edb/common/parsing.py\", line 401, in parse\r\n token = self.process_lex_token(mod, tok)\r\n File \"/work/edb/common/parsing.py\", line 390, in process_lex_token\r\n return mod.TokenMeta.for_lex_token(tok.kind())(\r\n File \"/work/edb/common/parsing.py\", line 100, in for_lex_token\r\n return mcls.token_map[mcls, token]\r\n KeyError: (<class 'edb.edgeql.parser.grammar.tokens.TokenMeta'>, 'SUBSTITUTION')\r\n```\r\nThis bug appeared after #2131 \n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport types\n\nfrom edb.common import parsing\n\nfrom . import keywords\nfrom . import precedence\nfrom . import lexer\n\n\nclean_string = re.compile(r\"'(?:\\s|\\n)+'\")\nstring_quote = re.compile(lexer.re_dquote)\n\n\nclass TokenMeta(parsing.TokenMeta):\n pass\n\n\nclass Token(parsing.Token, metaclass=TokenMeta,\n precedence_class=precedence.PrecedenceMeta):\n pass\n\n\nclass T_DOT(Token, lextoken='.'):\n pass\n\n\nclass T_DOTBW(Token, lextoken='.<'):\n pass\n\n\nclass T_LBRACKET(Token, lextoken='['):\n pass\n\n\nclass T_RBRACKET(Token, lextoken=']'):\n pass\n\n\nclass T_LPAREN(Token, lextoken='('):\n pass\n\n\nclass T_RPAREN(Token, lextoken=')'):\n pass\n\n\nclass T_LBRACE(Token, lextoken='{'):\n pass\n\n\nclass T_RBRACE(Token, lextoken='}'):\n pass\n\n\nclass T_DOUBLECOLON(Token, lextoken='::'):\n pass\n\n\nclass T_DOUBLEQMARK(Token, lextoken='??'):\n pass\n\n\nclass T_COLON(Token, lextoken=':'):\n pass\n\n\nclass T_SEMICOLON(Token, lextoken=';'):\n pass\n\n\nclass T_COMMA(Token, lextoken=','):\n pass\n\n\nclass T_PLUS(Token, lextoken='+'):\n pass\n\n\nclass T_DOUBLEPLUS(Token, lextoken='++'):\n pass\n\n\nclass T_MINUS(Token, lextoken='-'):\n pass\n\n\nclass T_STAR(Token, lextoken='*'):\n pass\n\n\nclass T_SLASH(Token, lextoken='/'):\n pass\n\n\nclass T_DOUBLESLASH(Token, lextoken='//'):\n pass\n\n\nclass T_PERCENT(Token, lextoken='%'):\n pass\n\n\nclass T_CIRCUMFLEX(Token, lextoken='^'):\n pass\n\n\nclass T_AT(Token, lextoken='@'):\n pass\n\n\nclass T_ARGUMENT(Token):\n pass\n\n\nclass T_ASSIGN(Token):\n pass\n\n\nclass T_ADDASSIGN(Token):\n pass\n\n\nclass T_REMASSIGN(Token):\n pass\n\n\nclass T_ARROW(Token):\n pass\n\n\nclass T_LANGBRACKET(Token, lextoken='<'):\n pass\n\n\nclass T_RANGBRACKET(Token, lextoken='>'):\n pass\n\n\nclass T_EQUALS(Token, lextoken='='):\n pass\n\n\nclass T_AMPER(Token, lextoken='&'):\n pass\n\n\nclass T_PIPE(Token, lextoken='|'):\n pass\n\n\nclass T_NAMEDONLY(Token):\n pass\n\n\nclass T_SETANNOTATION(Token):\n pass\n\n\nclass T_SETTYPE(Token):\n pass\n\n\nclass T_ICONST(Token):\n pass\n\n\nclass T_NICONST(Token):\n pass\n\n\nclass T_FCONST(Token):\n pass\n\n\nclass T_NFCONST(Token):\n pass\n\n\nclass T_BCONST(Token):\n pass\n\n\nclass T_SCONST(Token):\n pass\n\n\nclass T_RSCONST(Token):\n pass\n\n\nclass T_IDENT(Token):\n pass\n\n\nclass T_OP(Token):\n pass\n\n\nclass T_EOF(Token):\n pass\n\n\ndef _gen_keyword_tokens():\n # Define keyword tokens\n\n mod = sys.modules[__name__]\n\n def clsexec(ns):\n ns['__module__'] = __name__\n return ns\n\n for token, _ in keywords.edgeql_keywords.values():\n clsname = 'T_{}'.format(token)\n clskwds = dict(metaclass=parsing.TokenMeta, token=token)\n cls = types.new_class(clsname, (Token,), clskwds, clsexec)\n setattr(mod, clsname, cls)\n\n\n_gen_keyword_tokens()\n", "path": "edb/edgeql/parser/grammar/tokens.py"}]} | 2,640 | 97 |
gh_patches_debug_33 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1864 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop using external 'mock' dependency
As of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17
18 import nox
19
20 BLACK_VERSION = "black==22.3.0"
21 ISORT_VERSION = "isort==5.10.1"
22 BLACK_PATHS = [
23 "apiclient",
24 "googleapiclient",
25 "scripts",
26 "tests",
27 "describe.py",
28 "expandsymlinks.py",
29 "noxfile.py",
30 "owlbot.py",
31 "setup.py",
32 ]
33
34 test_dependencies = [
35 "django>=2.0.0",
36 "google-auth",
37 "google-auth-httplib2",
38 "mox",
39 "parameterized",
40 "pyopenssl",
41 "pytest",
42 "pytest-cov",
43 "webtest",
44 "coverage",
45 "mock",
46 ]
47
48
49 @nox.session(python=["3.7"])
50 def lint(session):
51 session.install("flake8")
52 session.run(
53 "flake8",
54 "googleapiclient",
55 "tests",
56 "--count",
57 "--select=E9,F63,F7,F82",
58 "--show-source",
59 "--statistics",
60 )
61
62
63 @nox.session(python="3.8")
64 def format(session):
65 """
66 Run isort to sort imports. Then run black
67 to format code to uniform standard.
68 """
69 session.install(BLACK_VERSION, ISORT_VERSION)
70 # Use the --fss option to sort imports using strict alphabetical order.
71 # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
72 session.run(
73 "isort",
74 "--fss",
75 *BLACK_PATHS,
76 )
77 session.run(
78 "black",
79 *BLACK_PATHS,
80 )
81
82
83 @nox.session(python=["3.7", "3.8", "3.9", "3.10"])
84 @nox.parametrize(
85 "oauth2client",
86 [
87 "oauth2client<2dev",
88 "oauth2client>=2,<=3dev",
89 "oauth2client>=3,<=4dev",
90 "oauth2client>=4,<=5dev",
91 ],
92 )
93 def unit(session, oauth2client):
94 # Clean up dist and build folders
95 shutil.rmtree("dist", ignore_errors=True)
96 shutil.rmtree("build", ignore_errors=True)
97
98 session.install(*test_dependencies)
99 session.install(oauth2client)
100
101 # Create and install wheels
102 session.run("python3", "setup.py", "bdist_wheel")
103 session.install(os.path.join("dist", os.listdir("dist").pop()))
104
105 # Run tests from a different directory to test the package artifacts
106 root_dir = os.path.dirname(os.path.realpath(__file__))
107 temp_dir = session.create_tmp()
108 session.chdir(temp_dir)
109 shutil.copytree(os.path.join(root_dir, "tests"), "tests")
110
111 # Run py.test against the unit tests.
112 session.run(
113 "py.test",
114 "--quiet",
115 "--cov=googleapiclient",
116 "--cov=tests",
117 "--cov-append",
118 "--cov-config=.coveragerc",
119 "--cov-report=",
120 "--cov-fail-under=85",
121 "tests",
122 *session.posargs,
123 )
124
125
126 @nox.session(python=["3.9"])
127 def scripts(session):
128 session.install(*test_dependencies)
129 session.install("-e", ".")
130 session.install("-r", "scripts/requirements.txt")
131
132 # Run py.test against the unit tests.
133 session.run(
134 "py.test",
135 "--quiet",
136 "--cov=scripts",
137 "--cov-config=.coveragerc",
138 "--cov-report=",
139 "--cov-fail-under=91",
140 "scripts",
141 *session.posargs,
142 )
143
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -42,7 +42,6 @@
"pytest-cov",
"webtest",
"coverage",
- "mock",
]
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -42,7 +42,6 @@\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n- \"mock\",\n ]\n", "issue": "Stop using external 'mock' dependency\nAs of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py"}]} | 1,833 | 61 |
gh_patches_debug_7786 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Comet PAPI Depreciated
Use of the Comet API logger reports an unecessary depreciation warning relating to the use of comet_ml.papi, rather than the newer comet_ml.api.
Example:
`COMET WARNING: You have imported comet_ml.papi; this interface is deprecated. Please use comet_ml.api instead. For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300`
</issue>
<code>
[start of pytorch_lightning/logging/comet.py]
1 """
2 Log using `comet <https://www.comet.ml>`_
3
4 Comet logger can be used in either online or offline mode.
5 To log in online mode, CometLogger requries an API key:
6
7 .. code-block:: python
8
9 from pytorch_lightning.logging import CometLogger
10 # arguments made to CometLogger are passed on to the comet_ml.Experiment class
11 comet_logger = CometLogger(
12 api_key=os.environ["COMET_KEY"],
13 workspace=os.environ["COMET_WORKSPACE"], # Optional
14 project_name="default_project", # Optional
15 rest_api_key=os.environ["COMET_REST_KEY"], # Optional
16 experiment_name="default" # Optional
17 )
18 trainer = Trainer(logger=comet_logger)
19
20 To log in offline mode, CometLogger requires a path to a local directory:
21
22 .. code-block:: python
23
24 from pytorch_lightning.logging import CometLogger
25 # arguments made to CometLogger are passed on to the comet_ml.Experiment class
26 comet_logger = CometLogger(
27 save_dir=".",
28 workspace=os.environ["COMET_WORKSPACE"], # Optional
29 project_name="default_project", # Optional
30 rest_api_key=os.environ["COMET_REST_KEY"], # Optional
31 experiment_name="default" # Optional
32 )
33 trainer = Trainer(logger=comet_logger)
34
35
36 Use the logger anywhere in you LightningModule as follows:
37
38 .. code-block:: python
39
40 def train_step(...):
41 # example
42 self.logger.experiment.whatever_comet_ml_supports(...)
43
44 def any_lightning_module_function_or_hook(...):
45 self.logger.experiment.whatever_comet_ml_supports(...)
46
47
48 """
49
50 from logging import getLogger
51
52 try:
53 from comet_ml import Experiment as CometExperiment
54 from comet_ml import OfflineExperiment as CometOfflineExperiment
55 from comet_ml.papi import API
56 except ImportError:
57 raise ImportError('Missing comet_ml package.')
58
59 from torch import is_tensor
60
61 from .base import LightningLoggerBase, rank_zero_only
62 from ..utilities.debugging import MisconfigurationException
63
64 logger = getLogger(__name__)
65
66
67 class CometLogger(LightningLoggerBase):
68 def __init__(self, api_key=None, save_dir=None, workspace=None,
69 rest_api_key=None, project_name=None, experiment_name=None, **kwargs):
70 """Initialize a Comet.ml logger.
71 Requires either an API Key (online mode) or a local directory path (offline mode)
72
73 :param str api_key: Required in online mode. API key, found on Comet.ml
74 :param str save_dir: Required in offline mode. The path for the directory to save local comet logs
75 :param str workspace: Optional. Name of workspace for this user
76 :param str project_name: Optional. Send your experiment to a specific project.
77 Otherwise will be sent to Uncategorized Experiments.
78 If project name does not already exists Comet.ml will create a new project.
79 :param str rest_api_key: Optional. Rest API key found in Comet.ml settings.
80 This is used to determine version number
81 :param str experiment_name: Optional. String representing the name for this particular experiment on Comet.ml
82 """
83 super().__init__()
84 self._experiment = None
85
86 # Determine online or offline mode based on which arguments were passed to CometLogger
87 if save_dir is not None and api_key is not None:
88 # If arguments are passed for both save_dir and api_key, preference is given to online mode
89 self.mode = "online"
90 self.api_key = api_key
91 elif api_key is not None:
92 self.mode = "online"
93 self.api_key = api_key
94 elif save_dir is not None:
95 self.mode = "offline"
96 self.save_dir = save_dir
97 else:
98 # If neither api_key nor save_dir are passed as arguments, raise an exception
99 raise MisconfigurationException("CometLogger requires either api_key or save_dir during initialization.")
100
101 logger.info(f"CometLogger will be initialized in {self.mode} mode")
102
103 self.workspace = workspace
104 self.project_name = project_name
105 self._kwargs = kwargs
106
107 if rest_api_key is not None:
108 # Comet.ml rest API, used to determine version number
109 self.rest_api_key = rest_api_key
110 self.comet_api = API(self.rest_api_key)
111 else:
112 self.rest_api_key = None
113 self.comet_api = None
114
115 if experiment_name:
116 try:
117 self.name = experiment_name
118 except TypeError as e:
119 logger.exception("Failed to set experiment name for comet.ml logger")
120
121 @property
122 def experiment(self):
123 if self._experiment is not None:
124 return self._experiment
125
126 if self.mode == "online":
127 self._experiment = CometExperiment(
128 api_key=self.api_key,
129 workspace=self.workspace,
130 project_name=self.project_name,
131 **self._kwargs
132 )
133 else:
134 self._experiment = CometOfflineExperiment(
135 offline_directory=self.save_dir,
136 workspace=self.workspace,
137 project_name=self.project_name,
138 **self._kwargs
139 )
140
141 return self._experiment
142
143 @rank_zero_only
144 def log_hyperparams(self, params):
145 self.experiment.log_parameters(vars(params))
146
147 @rank_zero_only
148 def log_metrics(self, metrics, step=None):
149 # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
150 for key, val in metrics.items():
151 if is_tensor(val):
152 metrics[key] = val.cpu().detach()
153
154 self.experiment.log_metrics(metrics, step=step)
155
156 @rank_zero_only
157 def finalize(self, status):
158 self.experiment.end()
159
160 @property
161 def name(self):
162 return self.experiment.project_name
163
164 @name.setter
165 def name(self, value):
166 self.experiment.set_name(value)
167
168 @property
169 def version(self):
170 return self.experiment.id
171
[end of pytorch_lightning/logging/comet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/logging/comet.py b/pytorch_lightning/logging/comet.py
--- a/pytorch_lightning/logging/comet.py
+++ b/pytorch_lightning/logging/comet.py
@@ -52,7 +52,11 @@
try:
from comet_ml import Experiment as CometExperiment
from comet_ml import OfflineExperiment as CometOfflineExperiment
- from comet_ml.papi import API
+ try:
+ from comet_ml.api import API
+ except ImportError:
+ # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300
+ from comet_ml.papi import API
except ImportError:
raise ImportError('Missing comet_ml package.')
| {"golden_diff": "diff --git a/pytorch_lightning/logging/comet.py b/pytorch_lightning/logging/comet.py\n--- a/pytorch_lightning/logging/comet.py\n+++ b/pytorch_lightning/logging/comet.py\n@@ -52,7 +52,11 @@\n try:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n- from comet_ml.papi import API\n+ try:\n+ from comet_ml.api import API\n+ except ImportError:\n+ # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n+ from comet_ml.papi import API\n except ImportError:\n raise ImportError('Missing comet_ml package.')\n", "issue": "Comet PAPI Depreciated\nUse of the Comet API logger reports an unecessary depreciation warning relating to the use of comet_ml.papi, rather than the newer comet_ml.api.\r\n\r\nExample:\r\n`COMET WARNING: You have imported comet_ml.papi; this interface is deprecated. Please use comet_ml.api instead. For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300`\n", "before_files": [{"content": "\"\"\"\nLog using `comet <https://www.comet.ml>`_\n\nComet logger can be used in either online or offline mode.\nTo log in online mode, CometLogger requries an API key:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n api_key=os.environ[\"COMET_KEY\"],\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\nTo log in offline mode, CometLogger requires a path to a local directory:\n\n.. code-block:: python\n\n from pytorch_lightning.logging import CometLogger\n # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n comet_logger = CometLogger(\n save_dir=\".\",\n workspace=os.environ[\"COMET_WORKSPACE\"], # Optional\n project_name=\"default_project\", # Optional\n rest_api_key=os.environ[\"COMET_REST_KEY\"], # Optional\n experiment_name=\"default\" # Optional\n )\n trainer = Trainer(logger=comet_logger)\n\n\nUse the logger anywhere in you LightningModule as follows:\n\n.. code-block:: python\n\n def train_step(...):\n # example\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n def any_lightning_module_function_or_hook(...):\n self.logger.experiment.whatever_comet_ml_supports(...)\n\n\n\"\"\"\n\nfrom logging import getLogger\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n from comet_ml.papi import API\nexcept ImportError:\n raise ImportError('Missing comet_ml package.')\n\nfrom torch import is_tensor\n\nfrom .base import LightningLoggerBase, rank_zero_only\nfrom ..utilities.debugging import MisconfigurationException\n\nlogger = getLogger(__name__)\n\n\nclass CometLogger(LightningLoggerBase):\n def __init__(self, api_key=None, save_dir=None, workspace=None,\n rest_api_key=None, project_name=None, experiment_name=None, **kwargs):\n \"\"\"Initialize a Comet.ml logger.\n Requires either an API Key (online mode) or a local directory path (offline mode)\n\n :param str api_key: Required in online mode. API key, found on Comet.ml\n :param str save_dir: Required in offline mode. The path for the directory to save local comet logs\n :param str workspace: Optional. Name of workspace for this user\n :param str project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If project name does not already exists Comet.ml will create a new project.\n :param str rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n :param str experiment_name: Optional. String representing the name for this particular experiment on Comet.ml\n \"\"\"\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n if save_dir is not None and api_key is not None:\n # If arguments are passed for both save_dir and api_key, preference is given to online mode\n self.mode = \"online\"\n self.api_key = api_key\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n elif save_dir is not None:\n self.mode = \"offline\"\n self.save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\"CometLogger requires either api_key or save_dir during initialization.\")\n\n logger.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n try:\n self.name = experiment_name\n except TypeError as e:\n logger.exception(\"Failed to set experiment name for comet.ml logger\")\n\n @property\n def experiment(self):\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.experiment.log_parameters(vars(params))\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n self.experiment.log_metrics(metrics, step=step)\n\n @rank_zero_only\n def finalize(self, status):\n self.experiment.end()\n\n @property\n def name(self):\n return self.experiment.project_name\n\n @name.setter\n def name(self, value):\n self.experiment.set_name(value)\n\n @property\n def version(self):\n return self.experiment.id\n", "path": "pytorch_lightning/logging/comet.py"}]} | 2,294 | 157 |
gh_patches_debug_10854 | rasdani/github-patches | git_diff | liqd__adhocracy4-1528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
project duplicate functionality with image
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/fake-project-799/basic/
**user:** moderator/ initiator/admin/
**expected behaviour:** When a project is duplicated, images of the new (duplicated) project are saved correctly
**behaviour:** *Duplicated new project's images are saved with a duplicated file path, e.g instead of `projects/background/image.png`, they are saved as `projects/background/projects/background/image.png`
~**important screensize:** *mobile? desktop?*~
~**device & browser:** *e.g. Desktop, firefox 68.0.2 (64-Bit)*~
**Comment/Question:** *Is this easy to do? What ideas do you have? Would this be a story?*

</issue>
<code>
[start of adhocracy4/dashboard/mixins.py]
1 from copy import deepcopy
2
3 from django.apps import apps
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import get_object_or_404
8 from django.shortcuts import redirect
9 from django.urls import NoReverseMatch
10 from django.utils import timezone
11 from django.utils.translation import gettext_lazy as _
12 from django.views.generic import base
13 from django.views.generic import edit
14
15 from adhocracy4.projects import models as project_models
16 from adhocracy4.rules import mixins as rules_mixins
17
18 from . import components
19 from . import get_project_dashboard
20 from . import signals
21
22 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
23
24
25 class DashboardBaseMixin(rules_mixins.PermissionRequiredMixin):
26 organisation_lookup_field = "slug"
27 organisation_url_kwarg = "organisation_slug"
28
29 @property
30 def organisation(self):
31 if self.organisation_url_kwarg and self.organisation_url_kwarg in self.kwargs:
32 lookup = {
33 self.organisation_lookup_field: self.kwargs[self.organisation_url_kwarg]
34 }
35 return get_object_or_404(Organisation, **lookup)
36
37 return self.project.organisation
38
39 @property
40 def other_organisations_of_user(self):
41 user = self.request.user
42 if self.organisation:
43 initiator_orgs = user.organisation_set.all()
44 if hasattr(Organisation, "groups") and user.groups.all():
45 user_groups = user.groups.all().values_list("id", flat=True)
46 group_orgs = Organisation.objects.filter(groups__in=user_groups)
47 orgs = initiator_orgs | group_orgs
48 return orgs.distinct().exclude(pk=self.organisation.pk)
49 return initiator_orgs.exclude(pk=self.organisation.pk)
50 else:
51 return None
52
53 def get_permission_object(self):
54 raise NotImplementedError("Set permission object.")
55
56 def get_success_url(self):
57 return self.request.path
58
59
60 class BlueprintMixin:
61 @property
62 def blueprint(self):
63 from .blueprints import get_blueprints
64
65 return dict(get_blueprints())[self.blueprint_key]
66
67 @property
68 def blueprint_key(self):
69 return self.kwargs["blueprint_slug"]
70
71
72 class DashboardComponentMixin(base.ContextMixin):
73 """Set the menu_item and add dashboard information to the context data.
74
75 Assumes self.project, self.module and self.component are set.
76 """
77
78 menu_item = "project"
79 component = None
80
81 def get_context_data(self, **kwargs):
82 context = super().get_context_data(**kwargs)
83
84 # Workaround Djangos update behavior:
85 # All fields from the POST data will be set on the view.object model
86 # instance, regardless of validation errors.
87 # Thus it is not reliable to check on empty fields on the view.object
88 # but it has to be ensured that the model reflects the database.
89 project = deepcopy(self.project)
90 if project:
91 project.refresh_from_db()
92
93 dashboard = get_project_dashboard(project)
94
95 context["dashboard_menu"] = dashboard.get_menu(self.module, self.component)
96
97 num_valid, num_required = dashboard.get_progress()
98 project_num_valid, project_num_required = dashboard.get_project_progress()
99 project_is_complete = project_num_valid == project_num_required
100 context["project_progress"] = {
101 "valid": num_valid,
102 "required": num_required,
103 "project_is_complete": project_is_complete,
104 }
105
106 return context
107
108
109 class DashboardComponentFormSignalMixin(edit.FormMixin):
110 def form_valid(self, form):
111 response = super().form_valid(form)
112
113 component = self.component
114 if component.identifier in components.projects:
115 signals.project_component_updated.send(
116 sender=component.__class__,
117 project=self.project,
118 component=component,
119 user=self.request.user,
120 )
121 else:
122 signals.module_component_updated.send(
123 sender=component.__class__,
124 module=self.module,
125 component=component,
126 user=self.request.user,
127 )
128 return response
129
130
131 class DashboardComponentDeleteSignalMixin(edit.DeletionMixin):
132 def delete(self, request, *args, **kwargs):
133 # Project and module have to be stored before delete is called as
134 # they may rely on the still existing db object.
135 project = self.project
136 module = self.module
137
138 response = super().delete(request, *args, **kwargs)
139
140 component = self.component
141 if component.identifier in components.projects:
142 signals.project_component_updated.send(
143 sender=component.__class__,
144 project=project,
145 component=component,
146 user=self.request.user,
147 )
148 else:
149 signals.module_component_updated.send(
150 sender=component.__class__,
151 module=module,
152 component=component,
153 user=self.request.user,
154 )
155 return response
156
157
158 class DashboardProjectDuplicateMixin:
159 def post(self, request, *args, **kwargs):
160 if "duplicate" in request.POST:
161 pk = int(request.POST["project_pk"])
162 project = get_object_or_404(project_models.Project, pk=pk)
163 can_add = request.user.has_perm("a4projects.add_project", project)
164
165 if not can_add:
166 raise PermissionDenied()
167
168 project_clone = deepcopy(project)
169 project_clone.pk = None
170 if project_clone.tile_image:
171 project_clone.tile_image.save(
172 project.tile_image.name, project.tile_image, False
173 )
174 if project_clone.image:
175 project_clone.image.save(project.image.name, project.image, False)
176 project_clone.created = timezone.now()
177 project_clone.is_draft = True
178 project_clone.is_archived = False
179 project_clone.save()
180 signals.project_created.send(
181 sender=None, project=project_clone, user=self.request.user
182 )
183
184 for moderator in project.moderators.all():
185 project_clone.moderators.add(moderator)
186
187 for module in project.module_set.all():
188 module_clone = deepcopy(module)
189 module_clone.project = project_clone
190 module_clone.pk = None
191 module_clone.save()
192 signals.module_created.send(
193 sender=None, module=module_clone, user=self.request.user
194 )
195
196 for phase in module.phase_set.all():
197 phase_clone = deepcopy(phase)
198 phase_clone.module = module_clone
199 phase_clone.pk = None
200 phase_clone.save()
201
202 settings_instance = module.settings_instance
203 if settings_instance:
204 settings_instance_clone = deepcopy(settings_instance)
205 settings_instance_clone.pk = None
206 settings_instance_clone.module = module_clone
207 settings_instance_clone.save()
208
209 messages.success(request, _("Project successfully duplicated."))
210
211 try:
212 org_slug = project_clone.organisation.slug
213 return redirect(
214 "a4dashboard:project-edit",
215 organisation_slug=org_slug,
216 project_slug=project_clone.slug,
217 )
218 except NoReverseMatch:
219 return redirect(
220 "a4dashboard:project-edit", project_slug=project_clone.slug
221 )
222 else:
223 return super().post(request, *args, **kwargs)
224
[end of adhocracy4/dashboard/mixins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/dashboard/mixins.py b/adhocracy4/dashboard/mixins.py
--- a/adhocracy4/dashboard/mixins.py
+++ b/adhocracy4/dashboard/mixins.py
@@ -167,12 +167,6 @@
project_clone = deepcopy(project)
project_clone.pk = None
- if project_clone.tile_image:
- project_clone.tile_image.save(
- project.tile_image.name, project.tile_image, False
- )
- if project_clone.image:
- project_clone.image.save(project.image.name, project.image, False)
project_clone.created = timezone.now()
project_clone.is_draft = True
project_clone.is_archived = False
| {"golden_diff": "diff --git a/adhocracy4/dashboard/mixins.py b/adhocracy4/dashboard/mixins.py\n--- a/adhocracy4/dashboard/mixins.py\n+++ b/adhocracy4/dashboard/mixins.py\n@@ -167,12 +167,6 @@\n \n project_clone = deepcopy(project)\n project_clone.pk = None\n- if project_clone.tile_image:\n- project_clone.tile_image.save(\n- project.tile_image.name, project.tile_image, False\n- )\n- if project_clone.image:\n- project_clone.image.save(project.image.name, project.image, False)\n project_clone.created = timezone.now()\n project_clone.is_draft = True\n project_clone.is_archived = False\n", "issue": "project duplicate functionality with image\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/fake-project-799/basic/\r\n**user:** moderator/ initiator/admin/\r\n**expected behaviour:** When a project is duplicated, images of the new (duplicated) project are saved correctly\r\n**behaviour:** *Duplicated new project's images are saved with a duplicated file path, e.g instead of `projects/background/image.png`, they are saved as `projects/background/projects/background/image.png`\r\n~**important screensize:** *mobile? desktop?*~\r\n~**device & browser:** *e.g. Desktop, firefox 68.0.2 (64-Bit)*~\r\n**Comment/Question:** *Is this easy to do? What ideas do you have? Would this be a story?* \r\n\r\n\r\n\n", "before_files": [{"content": "from copy import deepcopy\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.urls import NoReverseMatch\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import base\nfrom django.views.generic import edit\n\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.rules import mixins as rules_mixins\n\nfrom . import components\nfrom . import get_project_dashboard\nfrom . import signals\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass DashboardBaseMixin(rules_mixins.PermissionRequiredMixin):\n organisation_lookup_field = \"slug\"\n organisation_url_kwarg = \"organisation_slug\"\n\n @property\n def organisation(self):\n if self.organisation_url_kwarg and self.organisation_url_kwarg in self.kwargs:\n lookup = {\n self.organisation_lookup_field: self.kwargs[self.organisation_url_kwarg]\n }\n return get_object_or_404(Organisation, **lookup)\n\n return self.project.organisation\n\n @property\n def other_organisations_of_user(self):\n user = self.request.user\n if self.organisation:\n initiator_orgs = user.organisation_set.all()\n if hasattr(Organisation, \"groups\") and user.groups.all():\n user_groups = user.groups.all().values_list(\"id\", flat=True)\n group_orgs = Organisation.objects.filter(groups__in=user_groups)\n orgs = initiator_orgs | group_orgs\n return orgs.distinct().exclude(pk=self.organisation.pk)\n return initiator_orgs.exclude(pk=self.organisation.pk)\n else:\n return None\n\n def get_permission_object(self):\n raise NotImplementedError(\"Set permission object.\")\n\n def get_success_url(self):\n return self.request.path\n\n\nclass BlueprintMixin:\n @property\n def blueprint(self):\n from .blueprints import get_blueprints\n\n return dict(get_blueprints())[self.blueprint_key]\n\n @property\n def blueprint_key(self):\n return self.kwargs[\"blueprint_slug\"]\n\n\nclass DashboardComponentMixin(base.ContextMixin):\n \"\"\"Set the menu_item and add dashboard information to the context data.\n\n Assumes self.project, self.module and self.component are set.\n \"\"\"\n\n menu_item = \"project\"\n component = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Workaround Djangos update behavior:\n # All fields from the POST data will be set on the view.object model\n # instance, regardless of validation errors.\n # Thus it is not reliable to check on empty fields on the view.object\n # but it has to be ensured that the model reflects the database.\n project = deepcopy(self.project)\n if project:\n project.refresh_from_db()\n\n dashboard = get_project_dashboard(project)\n\n context[\"dashboard_menu\"] = dashboard.get_menu(self.module, self.component)\n\n num_valid, num_required = dashboard.get_progress()\n project_num_valid, project_num_required = dashboard.get_project_progress()\n project_is_complete = project_num_valid == project_num_required\n context[\"project_progress\"] = {\n \"valid\": num_valid,\n \"required\": num_required,\n \"project_is_complete\": project_is_complete,\n }\n\n return context\n\n\nclass DashboardComponentFormSignalMixin(edit.FormMixin):\n def form_valid(self, form):\n response = super().form_valid(form)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=self.project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=self.module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardComponentDeleteSignalMixin(edit.DeletionMixin):\n def delete(self, request, *args, **kwargs):\n # Project and module have to be stored before delete is called as\n # they may rely on the still existing db object.\n project = self.project\n module = self.module\n\n response = super().delete(request, *args, **kwargs)\n\n component = self.component\n if component.identifier in components.projects:\n signals.project_component_updated.send(\n sender=component.__class__,\n project=project,\n component=component,\n user=self.request.user,\n )\n else:\n signals.module_component_updated.send(\n sender=component.__class__,\n module=module,\n component=component,\n user=self.request.user,\n )\n return response\n\n\nclass DashboardProjectDuplicateMixin:\n def post(self, request, *args, **kwargs):\n if \"duplicate\" in request.POST:\n pk = int(request.POST[\"project_pk\"])\n project = get_object_or_404(project_models.Project, pk=pk)\n can_add = request.user.has_perm(\"a4projects.add_project\", project)\n\n if not can_add:\n raise PermissionDenied()\n\n project_clone = deepcopy(project)\n project_clone.pk = None\n if project_clone.tile_image:\n project_clone.tile_image.save(\n project.tile_image.name, project.tile_image, False\n )\n if project_clone.image:\n project_clone.image.save(project.image.name, project.image, False)\n project_clone.created = timezone.now()\n project_clone.is_draft = True\n project_clone.is_archived = False\n project_clone.save()\n signals.project_created.send(\n sender=None, project=project_clone, user=self.request.user\n )\n\n for moderator in project.moderators.all():\n project_clone.moderators.add(moderator)\n\n for module in project.module_set.all():\n module_clone = deepcopy(module)\n module_clone.project = project_clone\n module_clone.pk = None\n module_clone.save()\n signals.module_created.send(\n sender=None, module=module_clone, user=self.request.user\n )\n\n for phase in module.phase_set.all():\n phase_clone = deepcopy(phase)\n phase_clone.module = module_clone\n phase_clone.pk = None\n phase_clone.save()\n\n settings_instance = module.settings_instance\n if settings_instance:\n settings_instance_clone = deepcopy(settings_instance)\n settings_instance_clone.pk = None\n settings_instance_clone.module = module_clone\n settings_instance_clone.save()\n\n messages.success(request, _(\"Project successfully duplicated.\"))\n\n try:\n org_slug = project_clone.organisation.slug\n return redirect(\n \"a4dashboard:project-edit\",\n organisation_slug=org_slug,\n project_slug=project_clone.slug,\n )\n except NoReverseMatch:\n return redirect(\n \"a4dashboard:project-edit\", project_slug=project_clone.slug\n )\n else:\n return super().post(request, *args, **kwargs)\n", "path": "adhocracy4/dashboard/mixins.py"}]} | 2,814 | 157 |
gh_patches_debug_28626 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updating Server Example does not work
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* check the discussions forum https://github.com/riptideio/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 3.7, 3.8, 3,10
* OS: ubuntu bionic
* Pymodbus: 2.5.3
* Modbus Hardware (if used):
### Pymodbus Specific
* Server: tcp - sync/async
### Description
I try to run example, first error is
`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`
when i remove that info, it ends on
```
modbusupdater.py", line 81, in run_updating_server
loop.start(time, now=False) # initially delay by time
AttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'
```
async/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.
Thank you
</issue>
<code>
[start of examples/common/updating_server.py]
1 #!/usr/bin/env python3
2 # pylint: disable=missing-any-param-doc,differing-param-doc
3 """Pymodbus Server With Updating Thread.
4
5 This is an example of having a background thread updating the
6 context while the server is operating. This can also be done with
7 a python thread::
8
9 from threading import Thread
10 Thread(target=updating_writer, args=(context,)).start()
11 """
12 import logging
13 import asyncio
14
15 from pymodbus.datastore import (
16 ModbusSequentialDataBlock,
17 ModbusServerContext,
18 ModbusSlaveContext,
19 )
20 from pymodbus.device import ModbusDeviceIdentification
21 from pymodbus.server.async_io import StartTcpServer
22 from pymodbus.version import version
23
24 # --------------------------------------------------------------------------- #
25 # configure the service logging
26 # --------------------------------------------------------------------------- #
27 log = logging.getLogger()
28 log.setLevel(logging.DEBUG)
29
30 # --------------------------------------------------------------------------- #
31 # define your callback process
32 # --------------------------------------------------------------------------- #
33
34
35 def updating_writer(extra):
36 """Run every so often,
37
38 and updates live values of the context. It should be noted
39 that there is a race condition for the update.
40
41 :param arguments: The input arguments to the call
42 """
43 log.debug("updating the context")
44 context = extra[0]
45 register = 3
46 slave_id = 0x00
47 address = 0x10
48 values = context[slave_id].getValues(register, address, count=5)
49 values = [v + 1 for v in values]
50 txt = f"new values: {str(values)}"
51 log.debug(txt)
52 context[slave_id].setValues(register, address, values)
53
54
55 def run_updating_server():
56 """Run updating server."""
57 # ----------------------------------------------------------------------- #
58 # initialize your data store
59 # ----------------------------------------------------------------------- #
60
61 store = ModbusSlaveContext(
62 di=ModbusSequentialDataBlock(0, [17] * 100),
63 co=ModbusSequentialDataBlock(0, [17] * 100),
64 hr=ModbusSequentialDataBlock(0, [17] * 100),
65 ir=ModbusSequentialDataBlock(0, [17] * 100),
66 )
67 context = ModbusServerContext(slaves=store, single=True)
68
69 # ----------------------------------------------------------------------- #
70 # initialize the server information
71 # ----------------------------------------------------------------------- #
72 identity = ModbusDeviceIdentification(
73 info_name={
74 "VendorName": "pymodbus",
75 "ProductCode": "PM",
76 "VendorUrl": "https://github.com/riptideio/pymodbus/",
77 "ProductName": "pymodbus Server",
78 "ModelName": "pymodbus Server",
79 "MajorMinorRevision": version.short(),
80 }
81 )
82
83 # ----------------------------------------------------------------------- #
84 # run the server you want
85 # ----------------------------------------------------------------------- #
86 time = 5 # 5 seconds delay
87 loop = asyncio.get_event_loop()
88 loop.start(time, now=False) # initially delay by time
89 StartTcpServer(context, identity=identity, address=("localhost", 5020))
90
91
92 if __name__ == "__main__":
93 run_updating_server()
94
[end of examples/common/updating_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py
--- a/examples/common/updating_server.py
+++ b/examples/common/updating_server.py
@@ -36,7 +36,7 @@
"""Run every so often,
and updates live values of the context. It should be noted
- that there is a race condition for the update.
+ that there is a lrace condition for the update.
:param arguments: The input arguments to the call
"""
@@ -52,7 +52,7 @@
context[slave_id].setValues(register, address, values)
-def run_updating_server():
+async def run_updating_server():
"""Run updating server."""
# ----------------------------------------------------------------------- #
# initialize your data store
@@ -83,11 +83,15 @@
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
- time = 5 # 5 seconds delay
- loop = asyncio.get_event_loop()
- loop.start(time, now=False) # initially delay by time
- StartTcpServer(context, identity=identity, address=("localhost", 5020))
+ log.debug("Start server")
+ await StartTcpServer(
+ context,
+ identity=identity,
+ address=("localhost", 5020),
+ defer_start=False
+ )
+ log.debug("Done")
if __name__ == "__main__":
- run_updating_server()
+ asyncio.run(run_updating_server())
| {"golden_diff": "diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py\n--- a/examples/common/updating_server.py\n+++ b/examples/common/updating_server.py\n@@ -36,7 +36,7 @@\n \"\"\"Run every so often,\n \n and updates live values of the context. It should be noted\n- that there is a race condition for the update.\n+ that there is a lrace condition for the update.\n \n :param arguments: The input arguments to the call\n \"\"\"\n@@ -52,7 +52,7 @@\n context[slave_id].setValues(register, address, values)\n \n \n-def run_updating_server():\n+async def run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n@@ -83,11 +83,15 @@\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n- time = 5 # 5 seconds delay\n- loop = asyncio.get_event_loop()\n- loop.start(time, now=False) # initially delay by time\n- StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n+ log.debug(\"Start server\")\n+ await StartTcpServer(\n+ context,\n+ identity=identity,\n+ address=(\"localhost\", 5020),\n+ defer_start=False\n+ )\n+ log.debug(\"Done\")\n \n \n if __name__ == \"__main__\":\n- run_updating_server()\n+ asyncio.run(run_updating_server())\n", "issue": "Updating Server Example does not work\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * check the discussions forum https://github.com/riptideio/pymodbus/discussions\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.7, 3.8, 3,10\r\n* OS: ubuntu bionic\r\n* Pymodbus: 2.5.3\r\n* Modbus Hardware (if used): \r\n\r\n### Pymodbus Specific\r\n* Server: tcp - sync/async\r\n\r\n\r\n### Description\r\n\r\nI try to run example, first error is\r\n`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`\r\n\r\nwhen i remove that info, it ends on\r\n\r\n```\r\nmodbusupdater.py\", line 81, in run_updating_server\r\n loop.start(time, now=False) # initially delay by time\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'\r\n```\r\n\r\nasync/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.\r\n\r\nThank you\n", "before_files": [{"content": "#!/usr/bin/env python3\n# pylint: disable=missing-any-param-doc,differing-param-doc\n\"\"\"Pymodbus Server With Updating Thread.\n\nThis is an example of having a background thread updating the\ncontext while the server is operating. This can also be done with\na python thread::\n\n from threading import Thread\n Thread(target=updating_writer, args=(context,)).start()\n\"\"\"\nimport logging\nimport asyncio\n\nfrom pymodbus.datastore import (\n ModbusSequentialDataBlock,\n ModbusServerContext,\n ModbusSlaveContext,\n)\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.server.async_io import StartTcpServer\nfrom pymodbus.version import version\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# --------------------------------------------------------------------------- #\n# define your callback process\n# --------------------------------------------------------------------------- #\n\n\ndef updating_writer(extra):\n \"\"\"Run every so often,\n\n and updates live values of the context. It should be noted\n that there is a race condition for the update.\n\n :param arguments: The input arguments to the call\n \"\"\"\n log.debug(\"updating the context\")\n context = extra[0]\n register = 3\n slave_id = 0x00\n address = 0x10\n values = context[slave_id].getValues(register, address, count=5)\n values = [v + 1 for v in values]\n txt = f\"new values: {str(values)}\"\n log.debug(txt)\n context[slave_id].setValues(register, address, values)\n\n\ndef run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n\n store = ModbusSlaveContext(\n di=ModbusSequentialDataBlock(0, [17] * 100),\n co=ModbusSequentialDataBlock(0, [17] * 100),\n hr=ModbusSequentialDataBlock(0, [17] * 100),\n ir=ModbusSequentialDataBlock(0, [17] * 100),\n )\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification(\n info_name={\n \"VendorName\": \"pymodbus\",\n \"ProductCode\": \"PM\",\n \"VendorUrl\": \"https://github.com/riptideio/pymodbus/\",\n \"ProductName\": \"pymodbus Server\",\n \"ModelName\": \"pymodbus Server\",\n \"MajorMinorRevision\": version.short(),\n }\n )\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n time = 5 # 5 seconds delay\n loop = asyncio.get_event_loop()\n loop.start(time, now=False) # initially delay by time\n StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n\n\nif __name__ == \"__main__\":\n run_updating_server()\n", "path": "examples/common/updating_server.py"}]} | 1,735 | 338 |
gh_patches_debug_30500 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1943 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CLIPScore demo code is incorrect
Page: [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html)
Demo import CLIPScore as:
```python
from torchmetrics.multimodal import CLIPScore
```
But the correct import is:
```python
from torchmetrics.multimodal.clip_score import CLIPScore
```
</issue>
<code>
[start of src/torchmetrics/multimodal/clip_score.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Any, List, Optional, Sequence, Union
16
17 import torch
18 from torch import Tensor
19 from typing_extensions import Literal
20
21 from torchmetrics import Metric
22 from torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_model_and_processor
23 from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
24 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE, _TRANSFORMERS_AVAILABLE
25 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
26
27 if not _MATPLOTLIB_AVAILABLE:
28 __doctest_skip__ = ["CLIPScore.plot"]
29
30 _DEFAULT_MODEL: str = "openai/clip-vit-large-patch14"
31
32 if _TRANSFORMERS_AVAILABLE:
33 from transformers import CLIPModel as _CLIPModel
34 from transformers import CLIPProcessor as _CLIPProcessor
35
36 def _download_clip() -> None:
37 _CLIPModel.from_pretrained(_DEFAULT_MODEL)
38 _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)
39
40 if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
41 __doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
42 else:
43 __doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
44
45
46 class CLIPScore(Metric):
47 r"""Calculates `CLIP Score`_ which is a text-to-image similarity metric.
48
49 CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an
50 image and the actual content of the image. It has been found to be highly correlated with human judgement. The
51 metric is defined as:
52
53 .. math::
54 \text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
55
56 which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and
57 textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
58 to 100 the better.
59
60 .. note:: Metric is not scriptable
61
62 Args:
63 model_name_or_path: string indicating the version of the CLIP model to use. Available models are:
64
65 - `"openai/clip-vit-base-patch16"`
66 - `"openai/clip-vit-base-patch32"`
67 - `"openai/clip-vit-large-patch14-336"`
68 - `"openai/clip-vit-large-patch14"`
69
70 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
71
72 Raises:
73 ModuleNotFoundError:
74 If transformers package is not installed or version is lower than 4.10.0
75
76 Example:
77 >>> import torch
78 >>> _ = torch.manual_seed(42)
79 >>> from torchmetrics.multimodal import CLIPScore
80 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
81 >>> score = metric(torch.randint(255, (3, 224, 224)), "a photo of a cat")
82 >>> print(score.detach())
83 tensor(24.7691)
84
85 """
86
87 is_differentiable: bool = False
88 higher_is_better: bool = True
89 full_state_update: bool = True
90 plot_lower_bound: float = 0.0
91
92 score: Tensor
93 n_samples: Tensor
94 plot_upper_bound = 100.0
95
96 def __init__(
97 self,
98 model_name_or_path: Literal[
99 "openai/clip-vit-base-patch16",
100 "openai/clip-vit-base-patch32",
101 "openai/clip-vit-large-patch14-336",
102 "openai/clip-vit-large-patch14",
103 ] = _DEFAULT_MODEL, # type: ignore[assignment]
104 **kwargs: Any,
105 ) -> None:
106 super().__init__(**kwargs)
107 self.model, self.processor = _get_model_and_processor(model_name_or_path)
108 self.add_state("score", torch.tensor(0.0), dist_reduce_fx="sum")
109 self.add_state("n_samples", torch.tensor(0, dtype=torch.long), dist_reduce_fx="sum")
110
111 def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:
112 """Update CLIP score on a batch of images and text.
113
114 Args:
115 images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
116 text: Either a single caption or a list of captions
117
118 Raises:
119 ValueError:
120 If not all images have format [C, H, W]
121 ValueError:
122 If the number of images and captions do not match
123
124 """
125 score, n_samples = _clip_score_update(images, text, self.model, self.processor)
126 self.score += score.sum(0)
127 self.n_samples += n_samples
128
129 def compute(self) -> Tensor:
130 """Compute accumulated clip score."""
131 return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))
132
133 def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
134 """Plot a single or multiple values from the metric.
135
136 Args:
137 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
138 If no value is provided, will automatically call `metric.compute` and plot that result.
139 ax: An matplotlib axis object. If provided will add plot to that axis
140
141 Returns:
142 Figure and Axes object
143
144 Raises:
145 ModuleNotFoundError:
146 If `matplotlib` is not installed
147
148 .. plot::
149 :scale: 75
150
151 >>> # Example plotting a single value
152 >>> import torch
153 >>> from torchmetrics.multimodal import CLIPScore
154 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
155 >>> metric.update(torch.randint(255, (3, 224, 224)), "a photo of a cat")
156 >>> fig_, ax_ = metric.plot()
157
158 .. plot::
159 :scale: 75
160
161 >>> # Example plotting multiple values
162 >>> import torch
163 >>> from torchmetrics.multimodal import CLIPScore
164 >>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
165 >>> values = [ ]
166 >>> for _ in range(10):
167 ... values.append(metric(torch.randint(255, (3, 224, 224)), "a photo of a cat"))
168 >>> fig_, ax_ = metric.plot(values)
169
170 """
171 return self._plot(val, ax)
172
[end of src/torchmetrics/multimodal/clip_score.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/multimodal/clip_score.py b/src/torchmetrics/multimodal/clip_score.py
--- a/src/torchmetrics/multimodal/clip_score.py
+++ b/src/torchmetrics/multimodal/clip_score.py
@@ -76,7 +76,7 @@
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> score = metric(torch.randint(255, (3, 224, 224)), "a photo of a cat")
>>> print(score.detach())
@@ -150,7 +150,7 @@
>>> # Example plotting a single value
>>> import torch
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> metric.update(torch.randint(255, (3, 224, 224)), "a photo of a cat")
>>> fig_, ax_ = metric.plot()
@@ -160,7 +160,7 @@
>>> # Example plotting multiple values
>>> import torch
- >>> from torchmetrics.multimodal import CLIPScore
+ >>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> values = [ ]
>>> for _ in range(10):
| {"golden_diff": "diff --git a/src/torchmetrics/multimodal/clip_score.py b/src/torchmetrics/multimodal/clip_score.py\n--- a/src/torchmetrics/multimodal/clip_score.py\n+++ b/src/torchmetrics/multimodal/clip_score.py\n@@ -76,7 +76,7 @@\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> score = metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> print(score.detach())\n@@ -150,7 +150,7 @@\n \n >>> # Example plotting a single value\n >>> import torch\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> metric.update(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> fig_, ax_ = metric.plot()\n@@ -160,7 +160,7 @@\n \n >>> # Example plotting multiple values\n >>> import torch\n- >>> from torchmetrics.multimodal import CLIPScore\n+ >>> from torchmetrics.multimodal.clip_score import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> values = [ ]\n >>> for _ in range(10):\n", "issue": "CLIPScore demo code is incorrect\n\r\nPage: [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html)\r\n\r\nDemo import CLIPScore as:\r\n```python\r\nfrom torchmetrics.multimodal import CLIPScore\r\n```\r\n\r\nBut the correct import is: \r\n```python\r\nfrom torchmetrics.multimodal.clip_score import CLIPScore\r\n```\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, List, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics import Metric\nfrom torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_model_and_processor\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE, _TRANSFORMERS_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"CLIPScore.plot\"]\n\n_DEFAULT_MODEL: str = \"openai/clip-vit-large-patch14\"\n\nif _TRANSFORMERS_AVAILABLE:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(_DEFAULT_MODEL)\n _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\nelse:\n __doctest_skip__ = [\"CLIPScore\", \"CLIPScore.plot\"]\n\n\nclass CLIPScore(Metric):\n r\"\"\"Calculates `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are:\n\n - `\"openai/clip-vit-base-patch16\"`\n - `\"openai/clip-vit-base-patch32\"`\n - `\"openai/clip-vit-large-patch14-336\"`\n - `\"openai/clip-vit-large-patch14\"`\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> score = metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> print(score.detach())\n tensor(24.7691)\n\n \"\"\"\n\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = True\n plot_lower_bound: float = 0.0\n\n score: Tensor\n n_samples: Tensor\n plot_upper_bound = 100.0\n\n def __init__(\n self,\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = _DEFAULT_MODEL, # type: ignore[assignment]\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.model, self.processor = _get_model_and_processor(model_name_or_path)\n self.add_state(\"score\", torch.tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"n_samples\", torch.tensor(0, dtype=torch.long), dist_reduce_fx=\"sum\")\n\n def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:\n \"\"\"Update CLIP score on a batch of images and text.\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n\n Raises:\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n \"\"\"\n score, n_samples = _clip_score_update(images, text, self.model, self.processor)\n self.score += score.sum(0)\n self.n_samples += n_samples\n\n def compute(self) -> Tensor:\n \"\"\"Compute accumulated clip score.\"\"\"\n return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))\n\n def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> metric.update(torch.randint(255, (3, 224, 224)), \"a photo of a cat\")\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.multimodal import CLIPScore\n >>> metric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch16\")\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.randint(255, (3, 224, 224)), \"a photo of a cat\"))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/multimodal/clip_score.py"}]} | 2,726 | 405 |
gh_patches_debug_34843 | rasdani/github-patches | git_diff | ARM-DOE__ACT-678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handling Incorrect ARM DQRs when applied with ACT function
We have implemented a function to query the ARM Data Quality Report database to return time periods when data is flagged. There are three levels of flagging within the DQRs, with two of them able to replace the variable values with NaN. ARM has a lot of DQRs where the list of variables is not accurate, and is over listing the affected variables.
For example a DQR may indicate the instrument is malfunctioning and select all variables in the netCDF file. While the data is incorrect the location, time, and QC variables are correct. Currently we loop over variables and apply the DQR to variables listed in the DQR. Some variables listed are scalars which is causing an exception that we catch and handle. Handling the exception has a print statement indicating the scalar variable could not be indexed.
We should discuss a better way to handle these variables as the print statement may confuse users when there is nothing wrong with the scalar variables. A couple of options include:
1) Correctly handling the replacement of data values with NaN for the scalar variables
2) Setting a keyword to enact the print statement, but set to False as default
3) Skip over these most likely incorrectly listed variables.
An example output for sgpaosccn2colaE13.b1 on 20170903 using D170905.3
```
Skipping 'eta_lookup_table' DQR application because of IndexError
Skipping 'base_time' DQR application because of IndexError
Skipping 'alt' DQR application because of IndexError
Skipping 'droplet_size_bounds' DQR application because of IndexError
Skipping 'lon' DQR application because of IndexError
Skipping 'lat' DQR application because of IndexError
```
</issue>
<code>
[start of act/qc/arm.py]
1 """
2 Functions specifically for working with QC/DQRs from
3 the Atmospheric Radiation Measurement Program (ARM).
4
5 """
6
7 import datetime as dt
8 import numpy as np
9 import requests
10
11 from act.config import DEFAULT_DATASTREAM_NAME
12
13
14 def add_dqr_to_qc(
15 ds,
16 variable=None,
17 assessment='incorrect,suspect',
18 exclude=None,
19 include=None,
20 normalize_assessment=True,
21 cleanup_qc=True,
22 dqr_link=False,
23 ):
24 """
25 Function to query the ARM DQR web service for reports and
26 add as a new quality control test to ancillary quality control
27 variable. If no anicllary quality control variable exist a new
28 one will be created and lined to the data variable through
29 ancillary_variables attribure.
30
31 See online documentation from ARM Data
32 Quality Office on the use of the DQR web service.
33
34 https://code.arm.gov/docs/dqrws-examples/wikis/home
35
36 Information about the DQR web-service avaible at
37 https://adc.arm.gov/dqrws/
38
39 Parameters
40 ----------
41 ds : xarray.Dataset
42 Xarray dataset
43 variable : string, or list of str, or None
44 Variables to check DQR web service. If set to None will
45 attempt to update all variables.
46 assessment : string
47 assessment type to get DQRs. Current options include
48 'missing', 'suspect', 'incorrect' or any combination separated
49 by a comma.
50 exclude : list of strings
51 DQR IDs to exclude from adding into QC
52 include : list of strings
53 List of DQR IDs to include in flagging of data. Any other DQR IDs
54 will be ignored.
55 normalize_assessment : boolean
56 The DQR assessment term is different than the embedded QC
57 term. Embedded QC uses "Bad" and "Indeterminate" while
58 DQRs use "Incorrect" and "Suspect". Setting this will ensure
59 the same terms are used for both.
60 cleanup_qc : boolean
61 Call clean.cleanup() method to convert to standardized ancillary
62 quality control variables. Has a little bit of overhead so
63 if the Dataset has already been cleaned up, no need to run.
64 dqr_link : boolean
65 Prints out a link for each DQR to read the full DQR. Defaults to False
66
67 Returns
68 -------
69 ds : xarray.Dataset
70 Xarray dataset containing new quality control variables
71
72 Examples
73 --------
74 .. code-block:: python
75
76 from act.qc.arm import add_dqr_to_qc
77 ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
78
79
80 """
81
82 # DQR Webservice goes off datastreams, pull from the dataset
83 if 'datastream' in ds.attrs:
84 datastream = ds.attrs['datastream']
85 elif '_datastream' in ds.attrs:
86 datastream = ds.attrs['_datastream']
87 else:
88 raise ValueError('Dataset does not have datastream attribute')
89
90 if datastream == DEFAULT_DATASTREAM_NAME:
91 raise ValueError("'datastream' name required for DQR service set to default value "
92 f"{datastream}. Unable to perform DQR service query.")
93
94 # Clean up QC to conform to CF conventions
95 if cleanup_qc:
96 ds.clean.cleanup()
97
98 # In order to properly flag data, get all variables if None. Exclude QC variables.
99 if variable is None:
100 variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
101
102 # Check to ensure variable is list
103 if not isinstance(variable, (list, tuple)):
104 variable = [variable]
105
106 # Loop through each variable and call web service for that variable
107 for var_name in variable:
108 # Create URL
109 url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
110 url += datastream
111 url += '&varname=' + var_name
112 url += ''.join(
113 [
114 '&searchmetric=',
115 assessment,
116 '&dqrfields=dqrid,starttime,endtime,metric,subject',
117 ]
118 )
119
120 # Call web service
121 req = requests.get(url)
122
123 # Check status values and raise error if not successful
124 status = req.status_code
125 if status == 400:
126 raise ValueError('Check parameters')
127 if status == 500:
128 raise ValueError('DQR Webservice Temporarily Down')
129
130 # Get data and run through each dqr
131 dqrs = req.text.splitlines()
132 time = ds['time'].values
133 dqr_results = {}
134 for line in dqrs:
135 line = line.split('|')
136 dqr_no = line[0]
137
138 # Exclude DQRs if in list
139 if exclude is not None and dqr_no in exclude:
140 continue
141
142 # Only include if in include list
143 if include is not None and dqr_no not in include:
144 continue
145
146 starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
147 endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
148 ind = np.where((time >= starttime) & (time <= endtime))
149 if ind[0].size == 0:
150 continue
151
152 if dqr_no in dqr_results.keys():
153 dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
154 else:
155 dqr_results[dqr_no] = {
156 'index': ind,
157 'test_assessment': line[3],
158 'test_meaning': ': '.join([dqr_no, line[-1]]),
159 }
160 if dqr_link:
161 print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
162 print(dqr_no, '-', line[3], ':', print_url)
163 for key, value in dqr_results.items():
164 try:
165 ds.qcfilter.add_test(
166 var_name,
167 index=value['index'],
168 test_meaning=value['test_meaning'],
169 test_assessment=value['test_assessment'],
170 )
171 except IndexError:
172 print(f"Skipping '{var_name}' DQR application because of IndexError")
173
174 if normalize_assessment:
175 ds.clean.normalize_assessment(variables=var_name)
176
177 return ds
178
[end of act/qc/arm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/qc/arm.py b/act/qc/arm.py
--- a/act/qc/arm.py
+++ b/act/qc/arm.py
@@ -20,6 +20,7 @@
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
+ skip_location_vars=False,
):
"""
Function to query the ARM DQR web service for reports and
@@ -63,6 +64,9 @@
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
+ skip_location_vars : boolean
+ Does not apply DQRs to location variables. This can be useful in the event
+ the submitter has erroneously selected all variables.
Returns
-------
@@ -104,7 +108,11 @@
variable = [variable]
# Loop through each variable and call web service for that variable
+ loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']
for var_name in variable:
+ if skip_location_vars:
+ if var_name in loc_vars:
+ continue
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
@@ -146,9 +154,13 @@
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
+
if ind[0].size == 0:
continue
+ if 'time' not in ds[var_name].dims:
+ ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))
+
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
| {"golden_diff": "diff --git a/act/qc/arm.py b/act/qc/arm.py\n--- a/act/qc/arm.py\n+++ b/act/qc/arm.py\n@@ -20,6 +20,7 @@\n normalize_assessment=True,\n cleanup_qc=True,\n dqr_link=False,\n+ skip_location_vars=False,\n ):\n \"\"\"\n Function to query the ARM DQR web service for reports and\n@@ -63,6 +64,9 @@\n if the Dataset has already been cleaned up, no need to run.\n dqr_link : boolean\n Prints out a link for each DQR to read the full DQR. Defaults to False\n+ skip_location_vars : boolean\n+ Does not apply DQRs to location variables. This can be useful in the event\n+ the submitter has erroneously selected all variables.\n \n Returns\n -------\n@@ -104,7 +108,11 @@\n variable = [variable]\n \n # Loop through each variable and call web service for that variable\n+ loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']\n for var_name in variable:\n+ if skip_location_vars:\n+ if var_name in loc_vars:\n+ continue\n # Create URL\n url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='\n url += datastream\n@@ -146,9 +154,13 @@\n starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))\n endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))\n ind = np.where((time >= starttime) & (time <= endtime))\n+\n if ind[0].size == 0:\n continue\n \n+ if 'time' not in ds[var_name].dims:\n+ ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))\n+\n if dqr_no in dqr_results.keys():\n dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)\n else:\n", "issue": "Handling Incorrect ARM DQRs when applied with ACT function\nWe have implemented a function to query the ARM Data Quality Report database to return time periods when data is flagged. There are three levels of flagging within the DQRs, with two of them able to replace the variable values with NaN. ARM has a lot of DQRs where the list of variables is not accurate, and is over listing the affected variables.\r\n\r\nFor example a DQR may indicate the instrument is malfunctioning and select all variables in the netCDF file. While the data is incorrect the location, time, and QC variables are correct. Currently we loop over variables and apply the DQR to variables listed in the DQR. Some variables listed are scalars which is causing an exception that we catch and handle. Handling the exception has a print statement indicating the scalar variable could not be indexed.\r\n\r\nWe should discuss a better way to handle these variables as the print statement may confuse users when there is nothing wrong with the scalar variables. A couple of options include:\r\n\r\n1) Correctly handling the replacement of data values with NaN for the scalar variables\r\n2) Setting a keyword to enact the print statement, but set to False as default\r\n3) Skip over these most likely incorrectly listed variables.\r\n\r\nAn example output for sgpaosccn2colaE13.b1 on 20170903 using D170905.3\r\n```\r\nSkipping 'eta_lookup_table' DQR application because of IndexError\r\nSkipping 'base_time' DQR application because of IndexError\r\nSkipping 'alt' DQR application because of IndexError\r\nSkipping 'droplet_size_bounds' DQR application because of IndexError\r\nSkipping 'lon' DQR application because of IndexError\r\nSkipping 'lat' DQR application because of IndexError\r\n```\n", "before_files": [{"content": "\"\"\"\nFunctions specifically for working with QC/DQRs from\nthe Atmospheric Radiation Measurement Program (ARM).\n\n\"\"\"\n\nimport datetime as dt\nimport numpy as np\nimport requests\n\nfrom act.config import DEFAULT_DATASTREAM_NAME\n\n\ndef add_dqr_to_qc(\n ds,\n variable=None,\n assessment='incorrect,suspect',\n exclude=None,\n include=None,\n normalize_assessment=True,\n cleanup_qc=True,\n dqr_link=False,\n):\n \"\"\"\n Function to query the ARM DQR web service for reports and\n add as a new quality control test to ancillary quality control\n variable. If no anicllary quality control variable exist a new\n one will be created and lined to the data variable through\n ancillary_variables attribure.\n\n See online documentation from ARM Data\n Quality Office on the use of the DQR web service.\n\n https://code.arm.gov/docs/dqrws-examples/wikis/home\n\n Information about the DQR web-service avaible at\n https://adc.arm.gov/dqrws/\n\n Parameters\n ----------\n ds : xarray.Dataset\n Xarray dataset\n variable : string, or list of str, or None\n Variables to check DQR web service. If set to None will\n attempt to update all variables.\n assessment : string\n assessment type to get DQRs. Current options include\n 'missing', 'suspect', 'incorrect' or any combination separated\n by a comma.\n exclude : list of strings\n DQR IDs to exclude from adding into QC\n include : list of strings\n List of DQR IDs to include in flagging of data. Any other DQR IDs\n will be ignored.\n normalize_assessment : boolean\n The DQR assessment term is different than the embedded QC\n term. Embedded QC uses \"Bad\" and \"Indeterminate\" while\n DQRs use \"Incorrect\" and \"Suspect\". Setting this will ensure\n the same terms are used for both.\n cleanup_qc : boolean\n Call clean.cleanup() method to convert to standardized ancillary\n quality control variables. Has a little bit of overhead so\n if the Dataset has already been cleaned up, no need to run.\n dqr_link : boolean\n Prints out a link for each DQR to read the full DQR. Defaults to False\n\n Returns\n -------\n ds : xarray.Dataset\n Xarray dataset containing new quality control variables\n\n Examples\n --------\n .. code-block:: python\n\n from act.qc.arm import add_dqr_to_qc\n ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])\n\n\n \"\"\"\n\n # DQR Webservice goes off datastreams, pull from the dataset\n if 'datastream' in ds.attrs:\n datastream = ds.attrs['datastream']\n elif '_datastream' in ds.attrs:\n datastream = ds.attrs['_datastream']\n else:\n raise ValueError('Dataset does not have datastream attribute')\n\n if datastream == DEFAULT_DATASTREAM_NAME:\n raise ValueError(\"'datastream' name required for DQR service set to default value \"\n f\"{datastream}. Unable to perform DQR service query.\")\n\n # Clean up QC to conform to CF conventions\n if cleanup_qc:\n ds.clean.cleanup()\n\n # In order to properly flag data, get all variables if None. Exclude QC variables.\n if variable is None:\n variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))\n\n # Check to ensure variable is list\n if not isinstance(variable, (list, tuple)):\n variable = [variable]\n\n # Loop through each variable and call web service for that variable\n for var_name in variable:\n # Create URL\n url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='\n url += datastream\n url += '&varname=' + var_name\n url += ''.join(\n [\n '&searchmetric=',\n assessment,\n '&dqrfields=dqrid,starttime,endtime,metric,subject',\n ]\n )\n\n # Call web service\n req = requests.get(url)\n\n # Check status values and raise error if not successful\n status = req.status_code\n if status == 400:\n raise ValueError('Check parameters')\n if status == 500:\n raise ValueError('DQR Webservice Temporarily Down')\n\n # Get data and run through each dqr\n dqrs = req.text.splitlines()\n time = ds['time'].values\n dqr_results = {}\n for line in dqrs:\n line = line.split('|')\n dqr_no = line[0]\n\n # Exclude DQRs if in list\n if exclude is not None and dqr_no in exclude:\n continue\n\n # Only include if in include list\n if include is not None and dqr_no not in include:\n continue\n\n starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))\n endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))\n ind = np.where((time >= starttime) & (time <= endtime))\n if ind[0].size == 0:\n continue\n\n if dqr_no in dqr_results.keys():\n dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)\n else:\n dqr_results[dqr_no] = {\n 'index': ind,\n 'test_assessment': line[3],\n 'test_meaning': ': '.join([dqr_no, line[-1]]),\n }\n if dqr_link:\n print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)\n print(dqr_no, '-', line[3], ':', print_url)\n for key, value in dqr_results.items():\n try:\n ds.qcfilter.add_test(\n var_name,\n index=value['index'],\n test_meaning=value['test_meaning'],\n test_assessment=value['test_assessment'],\n )\n except IndexError:\n print(f\"Skipping '{var_name}' DQR application because of IndexError\")\n\n if normalize_assessment:\n ds.clean.normalize_assessment(variables=var_name)\n\n return ds\n", "path": "act/qc/arm.py"}]} | 2,708 | 475 |
gh_patches_debug_24209 | rasdani/github-patches | git_diff | nilearn__nilearn-3969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC] order of contributors
Adding the citation.cff allows to "cite" nilearn from the github UI in bibtex or APA format.
See below.
By default it will list contributors in the order they appear in the citation.cff file: so alphabetically for now.
Opening an issue to discuss if we want a different order and if so which one.
```bibtex
@software{Chamma_nilearn,
author = {Chamma, Ahmad and Frau-Pascual, Aina and Rothberg, Alex and Abadie, Alexandre and Abraham, Alexandre and Gramfort, Alexandre and Savio, Alexandre and Thual, Alexis and Kanaan, Amadeus and Pinho, Ana Luisa and Idrobo, Andrés Hoyos and Kieslinger, Anne-Sophie and Rokem, Ariel and Mensch, Arthur and Duran, Audrey and Cipollini, Ben and Thirion, Bertrand and Nguyen, Binh and Cakan, Caglar and Gorgolewski, Chris and Markiewicz, Chris and Horea, Christian and Gerloff, Christian and Reininger, Colin and Lane, Connor and Delettre, Céline and Gale, Dan and Gomez, Daniel and Bzdok, Danilo and Ellis, David G and Wassermann, Demian and Pisner, Derek and Orfanos, Dimitri Papadopoulos and DuPre, Elizabeth and Dohmatob, Elvis and Larson, Eric and Edmond, Evan and Pedregosa, Fabian and Liem, Franz and Varoquaux, Gael and Hollander, Gilles de and Kiar, Greg and Gilmore, Greydon and Lemaitre, Guillaume and Wang, Hao-Ting and Aggarwal, Himanshu and Abenes, Ian and Vogel, Jake and Margeta, Jan and Grobler, Jaques and Gors, Jason and Rasero, Javier and Kossaifi, Jean and King, Jean-Rémi and Dalenberg, Jelle Roelof and Lefort-Besnard, Jeremy and Dockes, Jerome and Chevalier, Jerome-Alexis and Wiesner, Johannes and Gorrono, Jon Haitz Legarreta and Sassenhagen, Jona and Teves, Joshua and Huntenburg, Julia and Peraza, Julio A and Daddy, Kamalakar Reddy and Sitek, Kevin and Helwegen, Koen and Shmelkov, Konstantin and Chawla, Kshitij and CHEN, Kun and Sasse, Leonard and Estève, Loic and Tetrel, Loic and Paz, Luz and Pietrantoni, Manon and Perez-Guevara, Martin and Wegrzyn, Martin and Goncalves, Mathias and Ekman, Matthias and Joulot, Matthieu and Rahim, Mehdi and Eickenberg, Michael and Hanke, Michael and Notter, Michael and Waskom, Michael and Wang, Michelle and Torabi, Mohammad and Boos, Moritz and Song, Myeong Seop and Clarke, Natasha and Shah, Neelay and Gensollen, Nicolas and Esteban, Oscar and Bogdan, Paul and Sanz-Leon, Paula and Herholz, Peer and Gervais, Philippe and Bellec, Pierre and Glaser, Pierre and Quirion, Pierre-Olivier and Raamana, Pradeep Reddy and Meudec, Raphael and Luke, Robert and Williamson, Robert and Guidotti, Roberto and Phlypo, Ronald and Hammonds, Ryan and Gau, Rémi and Patalasingh, Sachin and Hahn, Sage and Bougacha, Salma and Johnson, Sam Buck and Jawhar, Sami and Steinkamp, Simon and Singh, Sourav and Meisler, Steven and Lan, Sylvain and Takerkart, Sylvain and Samanta, Tarun and Salo, Taylor and Bazeille, Thomas and Vanasse, Tom and Diogo, Vasco and Michel, Vincent and Fritsch, Virgile and Halchenko, Yaroslav and Mzayek, Yasmin and Baratz, Zvi and Nájera, Óscar},
license = {BSD-4-Clause},
title = {{nilearn}},
url = {https://github.com/nilearn/nilearn}
}
```
</issue>
<code>
[start of maint_tools/citation_cff_maint.py]
1 """Update AUTHORS and names from CITATION.cff file."""
2 from __future__ import annotations
3
4 from pathlib import Path
5 from typing import Any
6
7 import ruamel.yaml
8
9 yaml = ruamel.yaml.YAML()
10 yaml.indent(mapping=2, sequence=4, offset=2)
11
12 CORE_DEVS = [
13 "Alexis Thual",
14 "Bertrand Thirion",
15 "Elizabeth DuPre",
16 "Hao-Ting Wang",
17 "Jerome Dockes",
18 "Nicolas Gensollen",
19 "Rémi Gau",
20 "Taylor Salo",
21 "Yasmin Mzayek",
22 ]
23
24
25 def root_dir() -> Path:
26 """Return path to root directory."""
27 return Path(__file__).parent.parent
28
29
30 def names_rst() -> Path:
31 """Return path to names.rst file."""
32 return root_dir() / "doc" / "changes" / "names.rst"
33
34
35 def citation_file() -> Path:
36 """Return path to CITATIONS.cff file."""
37 return root_dir() / "CITATION.cff"
38
39
40 def authors_file() -> Path:
41 """Return path to AUTHORS.rst file."""
42 return root_dir() / "AUTHORS.rst"
43
44
45 def read_citation_cff() -> dict[str, Any]:
46 """Read CITATION.cff file."""
47 print(f"Reading file: {citation_file()}")
48 with open(citation_file(), encoding="utf8") as f:
49 citation = yaml.load(f)
50 return citation
51
52
53 def write_citation_cff(citation: dict[str, Any]) -> None:
54 """Write CITATION.cff file."""
55 print(f"Writing file: {citation_file()}")
56 with open(citation_file(), "w", encoding="utf8") as f:
57 yaml.dump(citation, f)
58
59
60 def write_names_rst(citation: list[dict[str, str]]) -> None:
61 """Write names.rst file."""
62 print(f"Writing file: {names_rst()}")
63 with open(names_rst(), "w", encoding="utf8") as f:
64 header = """.. This file is automatically generated.
65 Do not edit manually.
66 If you want to add to add yourself to the list of authors,
67 please edit CITATION.cff and run maint_tools/citation_cff_maint.py.
68
69 """
70 print(header, file=f)
71
72 for i, author in enumerate(citation["authors"]):
73 line = (
74 f'.. _{author["given-names"]} {author["family-names"]}: '
75 f'{author["website"]}'
76 )
77 print(line, file=f)
78 if i < len(citation["authors"]) - 1:
79 print("", file=f)
80
81
82 def read_authors_file() -> list[str]:
83 """Read AUTHORS.rst file."""
84 print(f"Reading file: {authors_file()}")
85 with open(authors_file(), encoding="utf8") as f:
86 authors_file_content = f.readlines()
87 return authors_file_content
88
89
90 def write_authors_file(authors: list[dict[str, str]]) -> None:
91 """Write AUTHORS.rst file."""
92 authors_file_content = read_authors_file()
93 print(f"Writing file: {authors_file()}")
94 with open(authors_file(), "w", encoding="utf8") as f:
95 writing_team_section = False
96 for line in authors_file_content:
97 if ".. CORE DEV SECTION STARTS HERE" in line:
98 writing_team_section = True
99 write_team_section(f, authors)
100 if "Funding" in line:
101 writing_team_section = False
102 if not writing_team_section:
103 f.write(line)
104
105
106 def write_team_section(f, authors: list[dict[str, str]]) -> None:
107 """Write team section."""
108 print(" Updating team section")
109 f.write(
110 """.. CORE DEV SECTION STARTS HERE
111 The Core developers section is added automatically
112 and should not be edited manually.
113
114 .. _core_devs:
115
116 Core developers
117 ...............
118
119 The nilearn core developers are:
120
121 """
122 )
123
124 write_core_devs(f)
125
126 f.write(
127 """.. CORE DEV SECTION ENDS HERE
128 """
129 )
130
131 f.write(
132 """
133 .. OTHER CONTRIBUTION SECTION STARTS HERE
134 The Other contributors section is added automatically
135 and should not be edited manually.
136
137 Other contributors
138 ..................
139
140 Some other past or present contributors are:
141
142 """
143 )
144 for author_ in authors:
145 f.write(f"* `{author_['given-names']} {author_['family-names']}`_")
146 if author_.get("affiliation"):
147 f.write(f": {author_['affiliation']}")
148 f.write("\n")
149
150 f.write(
151 """
152 .. OTHER CONTRIBUTION SECTION ENDS HERE
153
154 """
155 )
156
157
158 def write_core_devs(f):
159 """Add core devs."""
160 for dev in CORE_DEVS:
161 f.write(f"* `{dev}`_\n")
162 f.write("\n")
163
164
165 def sort_authors(authors: list[dict[str, str]]) -> list[dict[str, str]]:
166 """Sort authors by given name."""
167 print(" Sorting authors by given name")
168 authors.sort(key=lambda x: x["given-names"])
169 return authors
170
171
172 def count_authors() -> int:
173 """Count authors in names.rst."""
174 nb_authors = 0
175 with open(names_rst(), encoding="utf8") as f:
176 # count authors
177 lines = f.readlines()
178 for line in lines:
179 if line.startswith(".. _"):
180 nb_authors += 1
181 return nb_authors
182
183
184 def main():
185 """Update names.rst and AUTHORS.rst files."""
186 citation = read_citation_cff()
187 citation["authors"] = sort_authors(citation["authors"])
188
189 nb_authors = count_authors()
190 write_names_rst(citation)
191 new_nb_authors = count_authors()
192 # Sanity check to make sure we have not lost anyone
193 assert nb_authors <= new_nb_authors
194
195 write_citation_cff(citation)
196
197 write_authors_file(citation["authors"])
198
199
200 if __name__ == "__main__":
201 main()
202
[end of maint_tools/citation_cff_maint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maint_tools/citation_cff_maint.py b/maint_tools/citation_cff_maint.py
--- a/maint_tools/citation_cff_maint.py
+++ b/maint_tools/citation_cff_maint.py
@@ -181,9 +181,25 @@
return nb_authors
+def remove_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:
+ """Remove consortium from authors."""
+ authors = [
+ author
+ for author in authors
+ if author["family-names"] != "Nilearn contributors"
+ ]
+ return authors
+
+
+def add_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:
+ """Add consortium to authors."""
+ return [{"family-names": "Nilearn contributors"}] + authors
+
+
def main():
"""Update names.rst and AUTHORS.rst files."""
citation = read_citation_cff()
+ citation["authors"] = remove_consortium(citation["authors"])
citation["authors"] = sort_authors(citation["authors"])
nb_authors = count_authors()
@@ -192,10 +208,11 @@
# Sanity check to make sure we have not lost anyone
assert nb_authors <= new_nb_authors
- write_citation_cff(citation)
-
write_authors_file(citation["authors"])
+ citation["authors"] = add_consortium(citation["authors"])
+ write_citation_cff(citation)
+
if __name__ == "__main__":
main()
| {"golden_diff": "diff --git a/maint_tools/citation_cff_maint.py b/maint_tools/citation_cff_maint.py\n--- a/maint_tools/citation_cff_maint.py\n+++ b/maint_tools/citation_cff_maint.py\n@@ -181,9 +181,25 @@\n return nb_authors\n \n \n+def remove_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n+ \"\"\"Remove consortium from authors.\"\"\"\n+ authors = [\n+ author\n+ for author in authors\n+ if author[\"family-names\"] != \"Nilearn contributors\"\n+ ]\n+ return authors\n+\n+\n+def add_consortium(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n+ \"\"\"Add consortium to authors.\"\"\"\n+ return [{\"family-names\": \"Nilearn contributors\"}] + authors\n+\n+\n def main():\n \"\"\"Update names.rst and AUTHORS.rst files.\"\"\"\n citation = read_citation_cff()\n+ citation[\"authors\"] = remove_consortium(citation[\"authors\"])\n citation[\"authors\"] = sort_authors(citation[\"authors\"])\n \n nb_authors = count_authors()\n@@ -192,10 +208,11 @@\n # Sanity check to make sure we have not lost anyone\n assert nb_authors <= new_nb_authors\n \n- write_citation_cff(citation)\n-\n write_authors_file(citation[\"authors\"])\n \n+ citation[\"authors\"] = add_consortium(citation[\"authors\"])\n+ write_citation_cff(citation)\n+\n \n if __name__ == \"__main__\":\n main()\n", "issue": "[DOC] order of contributors\nAdding the citation.cff allows to \"cite\" nilearn from the github UI in bibtex or APA format. \r\nSee below.\r\n\r\nBy default it will list contributors in the order they appear in the citation.cff file: so alphabetically for now.\r\n\r\nOpening an issue to discuss if we want a different order and if so which one.\r\n\r\n\r\n```bibtex\r\n@software{Chamma_nilearn,\r\nauthor = {Chamma, Ahmad and Frau-Pascual, Aina and Rothberg, Alex and Abadie, Alexandre and Abraham, Alexandre and Gramfort, Alexandre and Savio, Alexandre and Thual, Alexis and Kanaan, Amadeus and Pinho, Ana Luisa and Idrobo, Andr\u00e9s Hoyos and Kieslinger, Anne-Sophie and Rokem, Ariel and Mensch, Arthur and Duran, Audrey and Cipollini, Ben and Thirion, Bertrand and Nguyen, Binh and Cakan, Caglar and Gorgolewski, Chris and Markiewicz, Chris and Horea, Christian and Gerloff, Christian and Reininger, Colin and Lane, Connor and Delettre, C\u00e9line and Gale, Dan and Gomez, Daniel and Bzdok, Danilo and Ellis, David G and Wassermann, Demian and Pisner, Derek and Orfanos, Dimitri Papadopoulos and DuPre, Elizabeth and Dohmatob, Elvis and Larson, Eric and Edmond, Evan and Pedregosa, Fabian and Liem, Franz and Varoquaux, Gael and Hollander, Gilles de and Kiar, Greg and Gilmore, Greydon and Lemaitre, Guillaume and Wang, Hao-Ting and Aggarwal, Himanshu and Abenes, Ian and Vogel, Jake and Margeta, Jan and Grobler, Jaques and Gors, Jason and Rasero, Javier and Kossaifi, Jean and King, Jean-R\u00e9mi and Dalenberg, Jelle Roelof and Lefort-Besnard, Jeremy and Dockes, Jerome and Chevalier, Jerome-Alexis and Wiesner, Johannes and Gorrono, Jon Haitz Legarreta and Sassenhagen, Jona and Teves, Joshua and Huntenburg, Julia and Peraza, Julio A and Daddy, Kamalakar Reddy and Sitek, Kevin and Helwegen, Koen and Shmelkov, Konstantin and Chawla, Kshitij and CHEN, Kun and Sasse, Leonard and Est\u00e8ve, Loic and Tetrel, Loic and Paz, Luz and Pietrantoni, Manon and Perez-Guevara, Martin and Wegrzyn, Martin and Goncalves, Mathias and Ekman, Matthias and Joulot, Matthieu and Rahim, Mehdi and Eickenberg, Michael and Hanke, Michael and Notter, Michael and Waskom, Michael and Wang, Michelle and Torabi, Mohammad and Boos, Moritz and Song, Myeong Seop and Clarke, Natasha and Shah, Neelay and Gensollen, Nicolas and Esteban, Oscar and Bogdan, Paul and Sanz-Leon, Paula and Herholz, Peer and Gervais, Philippe and Bellec, Pierre and Glaser, Pierre and Quirion, Pierre-Olivier and Raamana, Pradeep Reddy and Meudec, Raphael and Luke, Robert and Williamson, Robert and Guidotti, Roberto and Phlypo, Ronald and Hammonds, Ryan and Gau, R\u00e9mi and Patalasingh, Sachin and Hahn, Sage and Bougacha, Salma and Johnson, Sam Buck and Jawhar, Sami and Steinkamp, Simon and Singh, Sourav and Meisler, Steven and Lan, Sylvain and Takerkart, Sylvain and Samanta, Tarun and Salo, Taylor and Bazeille, Thomas and Vanasse, Tom and Diogo, Vasco and Michel, Vincent and Fritsch, Virgile and Halchenko, Yaroslav and Mzayek, Yasmin and Baratz, Zvi and N\u00e1jera, \u00d3scar},\r\nlicense = {BSD-4-Clause},\r\ntitle = {{nilearn}},\r\nurl = {https://github.com/nilearn/nilearn}\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"Update AUTHORS and names from CITATION.cff file.\"\"\"\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport ruamel.yaml\n\nyaml = ruamel.yaml.YAML()\nyaml.indent(mapping=2, sequence=4, offset=2)\n\nCORE_DEVS = [\n \"Alexis Thual\",\n \"Bertrand Thirion\",\n \"Elizabeth DuPre\",\n \"Hao-Ting Wang\",\n \"Jerome Dockes\",\n \"Nicolas Gensollen\",\n \"R\u00e9mi Gau\",\n \"Taylor Salo\",\n \"Yasmin Mzayek\",\n]\n\n\ndef root_dir() -> Path:\n \"\"\"Return path to root directory.\"\"\"\n return Path(__file__).parent.parent\n\n\ndef names_rst() -> Path:\n \"\"\"Return path to names.rst file.\"\"\"\n return root_dir() / \"doc\" / \"changes\" / \"names.rst\"\n\n\ndef citation_file() -> Path:\n \"\"\"Return path to CITATIONS.cff file.\"\"\"\n return root_dir() / \"CITATION.cff\"\n\n\ndef authors_file() -> Path:\n \"\"\"Return path to AUTHORS.rst file.\"\"\"\n return root_dir() / \"AUTHORS.rst\"\n\n\ndef read_citation_cff() -> dict[str, Any]:\n \"\"\"Read CITATION.cff file.\"\"\"\n print(f\"Reading file: {citation_file()}\")\n with open(citation_file(), encoding=\"utf8\") as f:\n citation = yaml.load(f)\n return citation\n\n\ndef write_citation_cff(citation: dict[str, Any]) -> None:\n \"\"\"Write CITATION.cff file.\"\"\"\n print(f\"Writing file: {citation_file()}\")\n with open(citation_file(), \"w\", encoding=\"utf8\") as f:\n yaml.dump(citation, f)\n\n\ndef write_names_rst(citation: list[dict[str, str]]) -> None:\n \"\"\"Write names.rst file.\"\"\"\n print(f\"Writing file: {names_rst()}\")\n with open(names_rst(), \"w\", encoding=\"utf8\") as f:\n header = \"\"\".. This file is automatically generated.\n Do not edit manually.\n If you want to add to add yourself to the list of authors,\n please edit CITATION.cff and run maint_tools/citation_cff_maint.py.\n\n\"\"\"\n print(header, file=f)\n\n for i, author in enumerate(citation[\"authors\"]):\n line = (\n f'.. _{author[\"given-names\"]} {author[\"family-names\"]}: '\n f'{author[\"website\"]}'\n )\n print(line, file=f)\n if i < len(citation[\"authors\"]) - 1:\n print(\"\", file=f)\n\n\ndef read_authors_file() -> list[str]:\n \"\"\"Read AUTHORS.rst file.\"\"\"\n print(f\"Reading file: {authors_file()}\")\n with open(authors_file(), encoding=\"utf8\") as f:\n authors_file_content = f.readlines()\n return authors_file_content\n\n\ndef write_authors_file(authors: list[dict[str, str]]) -> None:\n \"\"\"Write AUTHORS.rst file.\"\"\"\n authors_file_content = read_authors_file()\n print(f\"Writing file: {authors_file()}\")\n with open(authors_file(), \"w\", encoding=\"utf8\") as f:\n writing_team_section = False\n for line in authors_file_content:\n if \".. CORE DEV SECTION STARTS HERE\" in line:\n writing_team_section = True\n write_team_section(f, authors)\n if \"Funding\" in line:\n writing_team_section = False\n if not writing_team_section:\n f.write(line)\n\n\ndef write_team_section(f, authors: list[dict[str, str]]) -> None:\n \"\"\"Write team section.\"\"\"\n print(\" Updating team section\")\n f.write(\n \"\"\".. CORE DEV SECTION STARTS HERE\n The Core developers section is added automatically\n and should not be edited manually.\n\n.. _core_devs:\n\nCore developers\n...............\n\nThe nilearn core developers are:\n\n\"\"\"\n )\n\n write_core_devs(f)\n\n f.write(\n \"\"\".. CORE DEV SECTION ENDS HERE\n\"\"\"\n )\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION STARTS HERE\n The Other contributors section is added automatically\n and should not be edited manually.\n\nOther contributors\n..................\n\nSome other past or present contributors are:\n\n\"\"\"\n )\n for author_ in authors:\n f.write(f\"* `{author_['given-names']} {author_['family-names']}`_\")\n if author_.get(\"affiliation\"):\n f.write(f\": {author_['affiliation']}\")\n f.write(\"\\n\")\n\n f.write(\n \"\"\"\n.. OTHER CONTRIBUTION SECTION ENDS HERE\n\n\"\"\"\n )\n\n\ndef write_core_devs(f):\n \"\"\"Add core devs.\"\"\"\n for dev in CORE_DEVS:\n f.write(f\"* `{dev}`_\\n\")\n f.write(\"\\n\")\n\n\ndef sort_authors(authors: list[dict[str, str]]) -> list[dict[str, str]]:\n \"\"\"Sort authors by given name.\"\"\"\n print(\" Sorting authors by given name\")\n authors.sort(key=lambda x: x[\"given-names\"])\n return authors\n\n\ndef count_authors() -> int:\n \"\"\"Count authors in names.rst.\"\"\"\n nb_authors = 0\n with open(names_rst(), encoding=\"utf8\") as f:\n # count authors\n lines = f.readlines()\n for line in lines:\n if line.startswith(\".. _\"):\n nb_authors += 1\n return nb_authors\n\n\ndef main():\n \"\"\"Update names.rst and AUTHORS.rst files.\"\"\"\n citation = read_citation_cff()\n citation[\"authors\"] = sort_authors(citation[\"authors\"])\n\n nb_authors = count_authors()\n write_names_rst(citation)\n new_nb_authors = count_authors()\n # Sanity check to make sure we have not lost anyone\n assert nb_authors <= new_nb_authors\n\n write_citation_cff(citation)\n\n write_authors_file(citation[\"authors\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "maint_tools/citation_cff_maint.py"}]} | 3,292 | 363 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.